diff options
Diffstat (limited to 'drivers/gpu/drm')
1598 files changed, 77968 insertions, 18640 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 4b2f7d794275..0e1c668b46d2 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -6,7 +6,7 @@ CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE # Unconditionally enable W=1 warnings locally -# --- begin copy-paste W=1 warnings from scripts/Makefile.extrawarn +# --- begin copy-paste W=1 warnings from scripts/Makefile.warn subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter subdir-ccflags-y += $(call cc-option, -Wrestrict) subdir-ccflags-y += -Wmissing-format-attribute @@ -41,6 +41,7 @@ drm-y := \ drm_bridge.o \ drm_cache.o \ drm_color_mgmt.o \ + drm_colorop.o \ drm_connector.o \ drm_crtc.o \ drm_displayid.o \ @@ -76,7 +77,8 @@ drm-y := \ drm-$(CONFIG_DRM_CLIENT) += \ drm_client.o \ drm_client_event.o \ - drm_client_modeset.o + drm_client_modeset.o \ + drm_client_sysrq.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o @@ -150,7 +152,8 @@ drm_kms_helper-y := \ drm_plane_helper.o \ drm_probe_helper.o \ drm_self_refresh_helper.o \ - drm_simple_kms_helper.o + drm_simple_kms_helper.o \ + drm_vblank_helper.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o @@ -245,7 +248,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \ quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@) cmd_hdrtest = \ $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \ - PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \ + PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \ touch $@ $(obj)/%.hdrtest: $(src)/%.h FORCE diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c index 54cde090c3f4..4554cf75565e 100644 --- a/drivers/gpu/drm/adp/adp_drv.c +++ b/drivers/gpu/drm/adp/adp_drv.c @@ -16,6 +16,7 @@ #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 1acfed2f92ef..7f515be5185d 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -43,14 +43,16 @@ config DRM_AMDGPU_SI bool "Enable amdgpu support for SI parts" depends on DRM_AMDGPU help - Choose this option if you want to enable experimental support + Choose this option if you want to enable support for SI (Southern Islands) asics. - SI is already supported in radeon. Experimental support for SI - in amdgpu will be disabled by default and is still provided by - radeon. Use module options to override this: + SI (Southern Islands) are first generation GCN GPUs, + supported by both drivers: radeon (old) and amdgpu (new). + By default, SI dedicated GPUs are supported by amdgpu. - radeon.si_support=0 amdgpu.si_support=1 + Use module options to override this: + To use radeon for SI, + radeon.si_support=1 amdgpu.si_support=0 config DRM_AMDGPU_CIK bool "Enable amdgpu support for CIK parts" @@ -59,11 +61,17 @@ config DRM_AMDGPU_CIK Choose this option if you want to enable support for CIK (Sea Islands) asics. - CIK is already supported in radeon. Support for CIK in amdgpu - will be disabled by default and is still provided by radeon. - Use module options to override this: + CIK (Sea Islands) are second generation GCN GPUs, + supported by both drivers: radeon (old) and amdgpu (new). + By default, + CIK dedicated GPUs are supported by amdgpu + CIK APUs are supported by radeon + Use module options to override this: + To use amdgpu for CIK, radeon.cik_support=0 amdgpu.cik_support=1 + To use radeon for CIK, + radeon.cik_support=1 amdgpu.cik_support=0 config DRM_AMDGPU_USERPTR bool "Always enable userptr write support" diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 64e7acff8f18..c88760fb52ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -37,7 +37,8 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_DISPLAY_PATH)/modules/inc \ -I$(FULL_AMD_DISPLAY_PATH)/dc \ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \ - -I$(FULL_AMD_PATH)/amdkfd + -I$(FULL_AMD_PATH)/amdkfd \ + -I$(FULL_AMD_PATH)/ras/ras_mgr # Locally disable W=1 warnings enabled in drm subsystem Makefile subdir-ccflags-y += -Wno-override-init @@ -77,7 +78,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \ dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \ - uvd_v3_1.o + uvd_v3_1.o vce_v1_0.o amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ @@ -324,4 +325,9 @@ amdgpu-y += \ isp_v4_1_1.o endif +AMD_GPU_RAS_PATH := ../ras +AMD_GPU_RAS_FULL_PATH := $(FULL_AMD_PATH)/ras +include $(AMD_GPU_RAS_FULL_PATH)/Makefile +amdgpu-y += $(AMD_GPU_RAS_FILES) + obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index 9569dc16dd3d..daa7b23bc775 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -88,6 +88,10 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev) uint32_t ip_block; int r, i; + /* Skip suspend of SDMA IP versions >= 4.4.2. They are multi-aid */ + if (adev->aid_mask) + ip_block_mask &= ~BIT(AMD_IP_BLOCK_TYPE_SDMA); + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6f5b4a0e0a34..9f9774f58ce1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -372,13 +372,15 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, u64 *flags); int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type); +bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, enum amd_ip_block_type block_type); int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block); int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block); -#define AMDGPU_MAX_IP_NUM 16 +#define AMDGPU_MAX_IP_NUM AMD_IP_BLOCK_TYPE_NUM struct amdgpu_ip_block_status { bool valid; @@ -839,8 +841,6 @@ struct amd_powerplay { const struct amd_pm_funcs *pp_funcs; }; -struct ip_discovery_top; - /* polaris10 kickers */ #define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \ ((rid == 0xE3) || \ @@ -972,8 +972,7 @@ struct amdgpu_device { struct notifier_block acpi_nb; struct notifier_block pm_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; - struct debugfs_blob_wrapper debugfs_vbios_blob; - struct debugfs_blob_wrapper debugfs_discovery_blob; + struct debugfs_blob_wrapper debugfs_vbios_blob; struct mutex srbm_mutex; /* GRBM index mutex. Protects concurrent access to GRBM index */ struct mutex grbm_idx_mutex; @@ -1063,6 +1062,9 @@ struct amdgpu_device { u32 log2_max_MBps; } mm_stats; + /* discovery*/ + struct amdgpu_discovery_info discovery; + /* display */ bool enable_virtual_display; struct amdgpu_vkms_output *amdgpu_vkms_output; @@ -1174,6 +1176,12 @@ struct amdgpu_device { * queue fence. */ struct xarray userq_xa; + /** + * @userq_doorbell_xa: Global user queue map (doorbell index → queue) + * Key: doorbell_index (unique global identifier for the queue) + * Value: struct amdgpu_usermode_queue + */ + struct xarray userq_doorbell_xa; /* df */ struct amdgpu_df df; @@ -1265,8 +1273,6 @@ struct amdgpu_device { struct list_head ras_list; - struct ip_discovery_top *ip_top; - struct amdgpu_reset_domain *reset_domain; struct mutex benchmark_mutex; @@ -1309,9 +1315,8 @@ struct amdgpu_device { */ bool apu_prefer_gtt; - struct list_head userq_mgr_list; - struct mutex userq_mutex; bool userq_halt_for_enforce_isolation; + struct work_struct userq_reset_work; struct amdgpu_uid *uid_info; /* KFD @@ -1535,11 +1540,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) -#define amdgpu_asic_flush_hdp(adev, r) \ - ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r))) -#define amdgpu_asic_invalidate_hdp(adev, r) \ - ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \ - ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0)) #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) @@ -1638,7 +1638,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_release_kms(struct drm_device *dev); -int amdgpu_device_ip_suspend(struct amdgpu_device *adev); int amdgpu_device_prepare(struct drm_device *dev); void amdgpu_device_complete(struct drm_device *dev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 4926996f94da..381ef205b0df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -302,17 +302,19 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block) adev->acp.acp_res[2].end = adev->acp.acp_res[2].start; adev->acp.acp_cell[0].name = "acp_audio_dma"; + adev->acp.acp_cell[0].id = 0; adev->acp.acp_cell[0].num_resources = 3; adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; adev->acp.acp_cell[0].platform_data = &adev->asic_type; adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); adev->acp.acp_cell[1].name = "designware-i2s"; + adev->acp.acp_cell[1].id = 1; adev->acp.acp_cell[1].num_resources = 1; adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); - r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2); + r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, 2, NULL, 0, NULL); if (r) goto failure; r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd, @@ -410,30 +412,34 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block) adev->acp.acp_res[4].end = adev->acp.acp_res[4].start; adev->acp.acp_cell[0].name = "acp_audio_dma"; + adev->acp.acp_cell[0].id = 0; adev->acp.acp_cell[0].num_resources = 5; adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; adev->acp.acp_cell[0].platform_data = &adev->asic_type; adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); adev->acp.acp_cell[1].name = "designware-i2s"; + adev->acp.acp_cell[1].id = 1; adev->acp.acp_cell[1].num_resources = 1; adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); adev->acp.acp_cell[2].name = "designware-i2s"; + adev->acp.acp_cell[2].id = 2; adev->acp.acp_cell[2].num_resources = 1; adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2]; adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); adev->acp.acp_cell[3].name = "designware-i2s"; + adev->acp.acp_cell[3].id = 3; adev->acp.acp_cell[3].num_resources = 1; adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3]; adev->acp.acp_cell[3].platform_data = &i2s_pdata[2]; adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data); - r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS); + r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, ACP_DEVS, NULL, 0, NULL); if (r) goto failure; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 6c62e27b9800..d31460a9e958 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -507,7 +507,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, pm_runtime_get_sync(adev_to_drm(adev)->dev); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(adev_to_drm(adev)); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 9e120c934cc1..8bdfcde2029b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -71,7 +71,7 @@ struct kgd_mem { struct mutex lock; struct amdgpu_bo *bo; struct dma_buf *dmabuf; - struct hmm_range *range; + struct amdgpu_hmm_range *range; struct list_head attachments; /* protected by amdkfd_process_info.lock */ struct list_head validate_list; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index a2ca9acf8c4e..b1c24c8fa686 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1057,7 +1057,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, struct amdkfd_process_info *process_info = mem->process_info; struct amdgpu_bo *bo = mem->bo; struct ttm_operation_ctx ctx = { true, false }; - struct hmm_range *range; + struct amdgpu_hmm_range *range; int ret = 0; mutex_lock(&process_info->lock); @@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, return 0; } - ret = amdgpu_ttm_tt_get_user_pages(bo, &range); + range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!range)) { + ret = -ENOMEM; + goto unregister_out; + } + + ret = amdgpu_ttm_tt_get_user_pages(bo, range); if (ret) { + amdgpu_hmm_range_free(range); if (ret == -EAGAIN) pr_debug("Failed to get user pages, try again\n"); else @@ -1113,7 +1120,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, amdgpu_bo_unreserve(bo); release_out: - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); + amdgpu_hmm_range_free(range); unregister_out: if (ret) amdgpu_hmm_unregister(bo); @@ -1267,6 +1274,10 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem, (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va); + /* VM entity stopped if process killed, don't clear freed pt bo */ + if (!amdgpu_vm_ready(vm)) + return 0; + (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); (void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL); @@ -1916,7 +1927,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { amdgpu_hmm_unregister(mem->bo); mutex_lock(&process_info->notifier_lock); - amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range); + amdgpu_hmm_range_free(mem->range); mutex_unlock(&process_info->notifier_lock); } @@ -1954,9 +1965,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( */ if (size) { if (!is_imported && - (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || - (adev->apu_prefer_gtt && - mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) *size = bo_size; else *size = 0; @@ -2542,7 +2551,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, bo = mem->bo; - amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range); + amdgpu_hmm_range_free(mem->range); mem->range = NULL; /* BO reservations and getting user pages (hmm_range_fault) @@ -2566,9 +2575,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, } } + mem->range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!mem->range)) + return -ENOMEM; /* Get updated user pages */ - ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); + ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range); if (ret) { + amdgpu_hmm_range_free(mem->range); + mem->range = NULL; pr_debug("Failed %d to get user pages\n", ret); /* Return -EFAULT bad address error as success. It will @@ -2741,8 +2755,8 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i continue; /* Only check mem with hmm range associated */ - valid = amdgpu_ttm_tt_get_user_pages_done( - mem->bo->tbo.ttm, mem->range); + valid = amdgpu_hmm_range_valid(mem->range); + amdgpu_hmm_range_free(mem->range); mem->range = NULL; if (!valid) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index c7d32fb216e4..636385c80f64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -181,19 +181,22 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) u8 frev, crev; int usage_bytes = 0; - if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { - if (frev == 2 && crev == 1) { - fw_usage_v2_1 = - (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); - amdgpu_atomfirmware_allocate_fb_v2_1(adev, - fw_usage_v2_1, - &usage_bytes); - } else if (frev >= 2 && crev >= 2) { - fw_usage_v2_2 = - (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset); - amdgpu_atomfirmware_allocate_fb_v2_2(adev, - fw_usage_v2_2, - &usage_bytes); + /* Skip atomfirmware allocation for SRIOV VFs when dynamic crit regn is enabled */ + if (!(amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled)) { + if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { + if (frev == 2 && crev == 1) { + fw_usage_v2_1 = + (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); + amdgpu_atomfirmware_allocate_fb_v2_1(adev, + fw_usage_v2_1, + &usage_bytes); + } else if (frev >= 2 && crev >= 2) { + fw_usage_v2_2 = + (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset); + amdgpu_atomfirmware_allocate_fb_v2_2(adev, + fw_usage_v2_2, + &usage_bytes); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 00e96419fcda..35d04e69aec0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -96,13 +96,14 @@ void amdgpu_bios_release(struct amdgpu_device *adev) * part of the system bios. On boot, the system bios puts a * copy of the igp rom at the start of vram if a discrete card is * present. - * For SR-IOV, the vbios image is also put in VRAM in the VF. + * For SR-IOV, if dynamic critical region is not enabled, + * the vbios image is also put at the start of VRAM in the VF. */ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev) { - uint8_t __iomem *bios; + uint8_t __iomem *bios = NULL; resource_size_t vram_base; - resource_size_t size = 256 * 1024; /* ??? */ + u32 size = 256U * 1024U; /* ??? */ if (!(adev->flags & AMD_IS_APU)) if (amdgpu_device_need_post(adev)) @@ -114,18 +115,33 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev) adev->bios = NULL; vram_base = pci_resource_start(adev->pdev, 0); - bios = ioremap_wc(vram_base, size); - if (!bios) - return false; adev->bios = kmalloc(size, GFP_KERNEL); - if (!adev->bios) { - iounmap(bios); + if (!adev->bios) return false; + + /* For SRIOV with dynamic critical region is enabled, + * the vbios image is put at a dynamic offset of VRAM in the VF. + * If dynamic critical region is disabled, follow the existing logic as on baremetal. + */ + if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) { + if (amdgpu_virt_get_dynamic_data_info(adev, + AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, &size)) { + amdgpu_bios_release(adev); + return false; + } + } else { + bios = ioremap_wc(vram_base, size); + if (!bios) { + amdgpu_bios_release(adev); + return false; + } + + memcpy_fromio(adev->bios, bios, size); + iounmap(bios); } + adev->bios_size = size; - memcpy_fromio(adev->bios, bios, size); - iounmap(bios); if (!check_atom_bios(adev, size)) { amdgpu_bios_release(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index a716c9886c74..2b5e7c46a39d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -38,7 +38,7 @@ struct amdgpu_bo_list_entry { struct amdgpu_bo *bo; struct amdgpu_bo_va *bo_va; uint32_t priority; - struct hmm_range *range; + struct amdgpu_hmm_range *range; bool user_invalidated; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 47e9bfba0642..9f96d568acf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -734,10 +734,8 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -919,10 +917,8 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1146,10 +1142,8 @@ out: amdgpu_connector_update_scratch_regs(connector, ret); exit: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1486,10 +1480,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector->connector_type == DRM_MODE_CONNECTOR_eDP) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2f6a96af7fb1..ecdfe6cb36cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -29,7 +29,6 @@ #include <linux/pagemap.h> #include <linux/sync_file.h> #include <linux/dma-buf.h> -#include <linux/hmm.h> #include <drm/amdgpu_drm.h> #include <drm/drm_syncobj.h> @@ -41,6 +40,7 @@ #include "amdgpu_gmc.h" #include "amdgpu_gem.h" #include "amdgpu_ras.h" +#include "amdgpu_hmm.h" static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, struct amdgpu_device *adev, @@ -891,12 +891,17 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, bool userpage_invalidated = false; struct amdgpu_bo *bo = e->bo; - r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); + e->range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!e->range)) + return -ENOMEM; + + r = amdgpu_ttm_tt_get_user_pages(bo, e->range); if (r) goto out_free_user_pages; for (i = 0; i < bo->tbo.ttm->num_pages; i++) { - if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) { + if (bo->tbo.ttm->pages[i] != + hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) { userpage_invalidated = true; break; } @@ -990,9 +995,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, out_free_user_pages: amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = e->bo; - - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); + amdgpu_hmm_range_free(e->range); e->range = NULL; } mutex_unlock(&p->bo_list->bo_list_mutex); @@ -1323,8 +1326,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, */ r = 0; amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm, - e->range); + r |= !amdgpu_hmm_range_valid(e->range); + amdgpu_hmm_range_free(e->range); e->range = NULL; } if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index f5d5c45ddc0d..afedea02188d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -236,7 +236,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv, &num_scheds, &scheds); if (r) - goto cleanup_entity; + goto error_free_entity; } /* disable load balance if the hw engine retains context among dependent jobs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a70651050acf..62d43b8cbe58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -129,7 +129,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; @@ -179,7 +178,6 @@ end: if (pm_pg_lock) mutex_unlock(&adev->pm.mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); @@ -255,7 +253,6 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off if (rd->id.use_grbm) { if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); mutex_unlock(&rd->lock); @@ -310,7 +307,6 @@ end: mutex_unlock(&rd->lock); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); @@ -446,7 +442,6 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (!x) { @@ -557,7 +552,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -617,7 +611,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -676,7 +669,6 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -736,7 +728,6 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -795,7 +786,6 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -855,7 +845,6 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -1003,7 +992,6 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) { @@ -1094,7 +1082,6 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (!x) { @@ -1192,7 +1179,6 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); while (size) { @@ -1266,7 +1252,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1315,7 +1300,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1365,7 +1349,6 @@ static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1414,7 +1397,6 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1460,7 +1442,6 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1501,7 +1482,6 @@ static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *bu r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1701,7 +1681,6 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) up_write(&adev->reset_domain->sem); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1721,7 +1700,6 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val) *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1742,7 +1720,6 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1762,7 +1739,6 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val) r = amdgpu_benchmark(adev, val); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; @@ -1902,7 +1878,7 @@ no_preempt: continue; } job = to_amdgpu_job(s_job); - if (preempted && (&job->hw_fence.base) == fence) + if (preempted && (&job->hw_fence->base) == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } @@ -2014,7 +1990,6 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val) ret = -EINVAL; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return ret; @@ -2123,10 +2098,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) debugfs_create_blob("amdgpu_vbios", 0444, root, &adev->debugfs_vbios_blob); - adev->debugfs_discovery_blob.data = adev->mman.discovery_bin; - adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size; - debugfs_create_blob("amdgpu_discovery", 0444, root, - &adev->debugfs_discovery_blob); + if (adev->discovery.debugfs_blob.size) + debugfs_create_blob("amdgpu_discovery", 0444, root, + &adev->discovery.debugfs_blob); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 8a026bc9ea44..4e2fe6674db8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -217,8 +217,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n"); drm_printf(&p, "kernel: " UTS_RELEASE "\n"); drm_printf(&p, "module: " KBUILD_MODNAME "\n"); - drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, - coredump->reset_time.tv_nsec); + drm_printf(&p, "time: %ptSp\n", &coredump->reset_time); if (coredump->reset_task_info.task.pid) drm_printf(&p, "process_name: %s PID: %d\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3d032c4e2dce..58c3ffe707d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -71,6 +71,7 @@ #include "amdgpu_xgmi.h" #include "amdgpu_ras.h" +#include "amdgpu_ras_mgr.h" #include "amdgpu_pmu.h" #include "amdgpu_fru_eeprom.h" #include "amdgpu_reset.h" @@ -179,6 +180,10 @@ struct amdgpu_init_level amdgpu_init_minimal_xgmi = { BIT(AMD_IP_BLOCK_TYPE_PSP) }; +static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev); +static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev); +static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev); + static void amdgpu_device_load_switch_state(struct amdgpu_device *adev); static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev, @@ -1673,9 +1678,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); struct pci_bus *root; struct resource *res; + int max_size, r; unsigned int i; u16 cmd; - int r; if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) return 0; @@ -1721,30 +1726,28 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) return 0; /* Limit the BAR size to what is available */ - rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, - rbar_size); + max_size = pci_rebar_get_max_size(adev->pdev, 0); + if (max_size < 0) + return 0; + rbar_size = min(max_size, rbar_size); /* Disable memory decoding while we change the BAR addresses and size */ pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); pci_write_config_word(adev->pdev, PCI_COMMAND, cmd & ~PCI_COMMAND_MEMORY); - /* Free the VRAM and doorbell BAR, we most likely need to move both. */ + /* Tear down doorbell as resizing will release BARs */ amdgpu_doorbell_fini(adev); - if (adev->asic_type >= CHIP_BONAIRE) - pci_release_resource(adev->pdev, 2); - pci_release_resource(adev->pdev, 0); - - r = pci_resize_resource(adev->pdev, 0, rbar_size); + r = pci_resize_resource(adev->pdev, 0, rbar_size, + (adev->asic_type >= CHIP_BONAIRE) ? 1 << 5 + : 1 << 2); if (r == -ENOSPC) dev_info(adev->dev, "Not enough PCI address space for a large BAR."); else if (r && r != -ENOTSUPP) dev_err(adev->dev, "Problem resizing BAR0 (%d).", r); - pci_assign_unassigned_bus_resources(adev->pdev->bus); - /* When the doorbell or fb BAR isn't available we have no chance of * using the device. */ @@ -2387,7 +2390,7 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, } /** - * amdgpu_device_ip_is_valid - is the hardware IP enabled + * amdgpu_device_ip_is_hw - is the hardware IP enabled * * @adev: amdgpu_device pointer * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) @@ -2395,6 +2398,27 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, * Check if the hardware IP is enable or not. * Returns true if it the IP is enable, false if not. */ +bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (adev->ip_blocks[i].version->type == block_type) + return adev->ip_blocks[i].status.hw; + } + return false; +} + +/** + * amdgpu_device_ip_is_valid - is the hardware IP valid + * + * @adev: amdgpu_device pointer + * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) + * + * Check if the hardware IP is valid or not. + * Returns true if it the IP is valid, false if not. + */ bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, enum amd_ip_block_type block_type) { @@ -2473,6 +2497,7 @@ static const char *ip_block_names[] = { [AMD_IP_BLOCK_TYPE_VPE] = "vpe", [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm", [AMD_IP_BLOCK_TYPE_ISP] = "isp", + [AMD_IP_BLOCK_TYPE_RAS] = "ras", }; static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type) @@ -2633,11 +2658,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) chip_name = "arcturus"; break; case CHIP_NAVI12: - if (adev->mman.discovery_bin) + if (adev->discovery.bin) return 0; chip_name = "navi12"; break; case CHIP_CYAN_SKILLFISH: + if (adev->discovery.bin) + return 0; chip_name = "cyan_skillfish"; break; } @@ -2761,6 +2788,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) r = amdgpu_virt_request_full_gpu(adev, true); if (r) return r; + + r = amdgpu_virt_init_critical_region(adev); + if (r) + return r; } switch (adev->asic_type) { @@ -3414,10 +3445,11 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev, (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - /* skip CG for VCE/UVD, it's handled specially */ + /* skip CG for VCE/UVD/VPE, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_powergating_state) { /* enable powergating to save power */ @@ -3649,6 +3681,20 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) "failed to release exclusive mode on fini\n"); } + /* + * Driver reload on the APU can fail due to firmware validation because + * the PSP is always running, as it is shared across the whole SoC. + * This same issue does not occur on dGPU because it has a mechanism + * that checks whether the PSP is running. A solution for those issues + * in the APU is to trigger a GPU reset, but this should be done during + * the unload phase to avoid adding boot latency and screen flicker. + */ + if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) { + r = amdgpu_asic_reset(adev); + if (r) + dev_err(adev->dev, "asic reset on %s failed\n", __func__); + } + return 0; } @@ -3759,7 +3805,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) */ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) { - int i, r; + int i, r, rec; amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); @@ -3780,13 +3826,25 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) continue; - /* XXX handle errors */ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); if (r) - return r; + goto unwind; } return 0; +unwind: + rec = amdgpu_device_ip_resume_phase3(adev); + if (rec) + dev_err(adev->dev, + "amdgpu_device_ip_resume_phase3 failed during unwind: %d\n", + rec); + + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW); + + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); + + return r; } /** @@ -3802,7 +3860,7 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) */ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) { - int i, r; + int i, r, rec; if (adev->in_s0ix) amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry); @@ -3863,9 +3921,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) continue; - /* XXX handle errors */ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); - adev->ip_blocks[i].status.hw = false; + if (r) + goto unwind; /* handle putting the SMC in the appropriate state */ if (!amdgpu_sriov_vf(adev)) { @@ -3875,13 +3933,40 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) dev_err(adev->dev, "SMC failed to set mp1 state %d, %d\n", adev->mp1_state, r); - return r; + goto unwind; } } } } return 0; +unwind: + /* suspend phase 2 = resume phase 1 + resume phase 2 */ + rec = amdgpu_device_ip_resume_phase1(adev); + if (rec) { + dev_err(adev->dev, + "amdgpu_device_ip_resume_phase1 failed during unwind: %d\n", + rec); + return r; + } + + rec = amdgpu_device_fw_loading(adev); + if (rec) { + dev_err(adev->dev, + "amdgpu_device_fw_loading failed during unwind: %d\n", + rec); + return r; + } + + rec = amdgpu_device_ip_resume_phase2(adev); + if (rec) { + dev_err(adev->dev, + "amdgpu_device_ip_resume_phase2 failed during unwind: %d\n", + rec); + return r; + } + + return r; } /** @@ -3895,7 +3980,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) * in each IP into a state suitable for suspend. * Returns 0 on success, negative error code on failure. */ -int amdgpu_device_ip_suspend(struct amdgpu_device *adev) +static int amdgpu_device_ip_suspend(struct amdgpu_device *adev) { int r; @@ -4179,25 +4264,13 @@ bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: - /* - * We have systems in the wild with these ASICs that require - * LVDS and VGA support which is not supported with DC. - * - * Fallback to the non-DC driver here by default so as not to - * cause regressions. - */ -#if defined(CONFIG_DRM_AMD_DC_SI) - return amdgpu_dc > 0; -#else - return false; -#endif - case CHIP_BONAIRE: + return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI); case CHIP_KAVERI: case CHIP_KABINI: case CHIP_MULLINS: /* * We have systems in the wild with these ASICs that require - * VGA support which is not supported with DC. + * TRAVIS and NUTMEG support which is not supported with DC. * * Fallback to the non-DC driver here by default so as not to * cause regressions. @@ -4285,58 +4358,53 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) long timeout; int ret = 0; - /* - * By default timeout for jobs is 10 sec - */ - adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000); - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + /* By default timeout for all queues is 2 sec */ + adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout = + adev->video_timeout = msecs_to_jiffies(2000); - if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { - while ((timeout_setting = strsep(&input, ",")) && - strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { - ret = kstrtol(timeout_setting, 0, &timeout); - if (ret) - return ret; + if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) + return 0; - if (timeout == 0) { - index++; - continue; - } else if (timeout < 0) { - timeout = MAX_SCHEDULE_TIMEOUT; - dev_warn(adev->dev, "lockup timeout disabled"); - add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); - } else { - timeout = msecs_to_jiffies(timeout); - } + while ((timeout_setting = strsep(&input, ",")) && + strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + ret = kstrtol(timeout_setting, 0, &timeout); + if (ret) + return ret; - switch (index++) { - case 0: - adev->gfx_timeout = timeout; - break; - case 1: - adev->compute_timeout = timeout; - break; - case 2: - adev->sdma_timeout = timeout; - break; - case 3: - adev->video_timeout = timeout; - break; - default: - break; - } + if (timeout == 0) { + index++; + continue; + } else if (timeout < 0) { + timeout = MAX_SCHEDULE_TIMEOUT; + dev_warn(adev->dev, "lockup timeout disabled"); + add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); + } else { + timeout = msecs_to_jiffies(timeout); } - /* - * There is only one value specified and - * it should apply to all non-compute jobs. - */ - if (index == 1) { - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) - adev->compute_timeout = adev->gfx_timeout; + + switch (index++) { + case 0: + adev->gfx_timeout = timeout; + break; + case 1: + adev->compute_timeout = timeout; + break; + case 2: + adev->sdma_timeout = timeout; + break; + case 3: + adev->video_timeout = timeout; + break; + default: + break; } } + /* When only one value specified apply it to all queues. */ + if (index == 1) + adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout = + adev->video_timeout = timeout; + return ret; } @@ -4391,6 +4459,55 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) dev_info(adev->dev, "MCBP is enabled\n"); } +static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_atombios_sysfs_init(adev); + if (r) + drm_err(&adev->ddev, + "registering atombios sysfs failed (%d).\n", r); + + r = amdgpu_pm_sysfs_init(adev); + if (r) + dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); + + r = amdgpu_ucode_sysfs_init(adev); + if (r) { + adev->ucode_sysfs_en = false; + dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); + } else + adev->ucode_sysfs_en = true; + + r = amdgpu_device_attr_sysfs_init(adev); + if (r) + dev_err(adev->dev, "Could not create amdgpu device attr\n"); + + r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); + if (r) + dev_err(adev->dev, + "Could not create amdgpu board attributes\n"); + + amdgpu_fru_sysfs_init(adev); + amdgpu_reg_state_sysfs_init(adev); + amdgpu_xcp_sysfs_init(adev); + + return r; +} + +static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev) +{ + if (adev->pm.sysfs_initialized) + amdgpu_pm_sysfs_fini(adev); + if (adev->ucode_sysfs_en) + amdgpu_ucode_sysfs_fini(adev); + amdgpu_device_attr_sysfs_fini(adev); + amdgpu_fru_sysfs_fini(adev); + + amdgpu_reg_state_sysfs_fini(adev); + amdgpu_xcp_sysfs_fini(adev); +} + /** * amdgpu_device_init - initialize the driver * @@ -4490,7 +4607,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->gfx.userq_sch_mutex); mutex_init(&adev->gfx.workload_profile_mutex); mutex_init(&adev->vcn.workload_profile_mutex); - mutex_init(&adev->userq_mutex); amdgpu_device_init_apu_flags(adev); @@ -4518,7 +4634,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->pm.od_kobj_list); - INIT_LIST_HEAD(&adev->userq_mgr_list); + xa_init(&adev->userq_doorbell_xa); INIT_DELAYED_WORK(&adev->delayed_init_work, amdgpu_device_delayed_init_work_handler); @@ -4541,6 +4657,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, } INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); + INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work); adev->gfx.gfx_off_req_count = 1; adev->gfx.gfx_off_residency = 0; @@ -4814,39 +4931,14 @@ fence_driver_init: flush_delayed_work(&adev->delayed_init_work); } + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) + amdgpu_xgmi_reset_on_init(adev); /* * Place those sysfs registering after `late_init`. As some of those * operations performed in `late_init` might affect the sysfs * interfaces creating. */ - r = amdgpu_atombios_sysfs_init(adev); - if (r) - drm_err(&adev->ddev, - "registering atombios sysfs failed (%d).\n", r); - - r = amdgpu_pm_sysfs_init(adev); - if (r) - dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); - - r = amdgpu_ucode_sysfs_init(adev); - if (r) { - adev->ucode_sysfs_en = false; - dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); - } else - adev->ucode_sysfs_en = true; - - r = amdgpu_device_attr_sysfs_init(adev); - if (r) - dev_err(adev->dev, "Could not create amdgpu device attr\n"); - - r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); - if (r) - dev_err(adev->dev, - "Could not create amdgpu board attributes\n"); - - amdgpu_fru_sysfs_init(adev); - amdgpu_reg_state_sysfs_init(adev); - amdgpu_xcp_sysfs_init(adev); + r = amdgpu_device_sys_interface_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); @@ -4874,9 +4966,6 @@ fence_driver_init: if (px) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) - amdgpu_xgmi_reset_on_init(adev); - amdgpu_device_check_iommu_direct_map(adev); adev->pm_nb.notifier_call = amdgpu_device_pm_notifier; @@ -4968,15 +5057,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) } amdgpu_fence_driver_hw_fini(adev); - if (adev->pm.sysfs_initialized) - amdgpu_pm_sysfs_fini(adev); - if (adev->ucode_sysfs_en) - amdgpu_ucode_sysfs_fini(adev); - amdgpu_device_attr_sysfs_fini(adev); - amdgpu_fru_sysfs_fini(adev); - - amdgpu_reg_state_sysfs_fini(adev); - amdgpu_xcp_sysfs_fini(adev); + amdgpu_device_sys_interface_fini(adev); /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -5051,7 +5132,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); - if (adev->mman.discovery_bin) + if (adev->discovery.bin) amdgpu_discovery_fini(adev); amdgpu_reset_put_reset_domain(adev->reset_domain); @@ -5199,7 +5280,7 @@ void amdgpu_device_complete(struct drm_device *dev) int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) { struct amdgpu_device *adev = drm_to_adev(dev); - int r = 0; + int r, rec; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -5215,39 +5296,92 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) return r; } - if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3)) - dev_warn(adev->dev, "smart shift update failed\n"); + r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3); + if (r) + goto unwind_sriov; if (notify_clients) - drm_client_dev_suspend(adev_to_drm(adev), false); + drm_client_dev_suspend(adev_to_drm(adev)); cancel_delayed_work_sync(&adev->delayed_init_work); amdgpu_ras_suspend(adev); - amdgpu_device_ip_suspend_phase1(adev); + r = amdgpu_device_ip_suspend_phase1(adev); + if (r) + goto unwind_smartshift; amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); - amdgpu_userq_suspend(adev); + r = amdgpu_userq_suspend(adev); + if (r) + goto unwind_ip_phase1; r = amdgpu_device_evict_resources(adev); if (r) - return r; + goto unwind_userq; amdgpu_ttm_set_buffer_funcs_status(adev, false); amdgpu_fence_driver_hw_fini(adev); - amdgpu_device_ip_suspend_phase2(adev); + r = amdgpu_device_ip_suspend_phase2(adev); + if (r) + goto unwind_evict; if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, false); - r = amdgpu_dpm_notify_rlc_state(adev, false); - if (r) + return 0; + +unwind_evict: + if (adev->mman.buffer_funcs_ring->sched.ready) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + amdgpu_fence_driver_hw_init(adev); + +unwind_userq: + rec = amdgpu_userq_resume(adev); + if (rec) { + dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec); return r; + } + rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); + if (rec) { + dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec); + return r; + } - return 0; +unwind_ip_phase1: + /* suspend phase 1 = resume phase 3 */ + rec = amdgpu_device_ip_resume_phase3(adev); + if (rec) { + dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec); + return r; + } + +unwind_smartshift: + rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0); + if (rec) { + dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec); + return r; + } + + if (notify_clients) + drm_client_dev_resume(adev_to_drm(adev)); + + amdgpu_ras_resume(adev); + +unwind_sriov: + if (amdgpu_sriov_vf(adev)) { + rec = amdgpu_virt_request_full_gpu(adev, true); + if (rec) { + dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec); + return r; + } + } + + adev->in_suspend = adev->in_s0ix = adev->in_s3 = false; + + return r; } static inline int amdgpu_virt_resume(struct amdgpu_device *adev) @@ -5353,7 +5487,7 @@ exit: flush_delayed_work(&adev->delayed_init_work); if (notify_clients) - drm_client_dev_resume(adev_to_drm(adev), false); + drm_client_dev_resume(adev_to_drm(adev)); amdgpu_ras_resume(adev); @@ -5809,11 +5943,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (!amdgpu_ring_sched_ready(ring)) continue; - /* Clear job fence from fence drv to avoid force_completion - * leave NULL and vm flush fence in fence drv - */ - amdgpu_fence_driver_clear_job_fences(ring); - /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); } @@ -5958,7 +6087,11 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) if (r) goto out; - drm_client_dev_resume(adev_to_drm(tmp_adev), false); + r = amdgpu_userq_post_reset(tmp_adev, vram_lost); + if (r) + goto out; + + drm_client_dev_resume(adev_to_drm(tmp_adev)); /* * The GPU enters bad state once faulty pages @@ -6180,6 +6313,7 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) cancel_work(&adev->reset_work); #endif + cancel_work(&adev->userq_reset_work); if (adev->kfd.dev) cancel_work(&adev->kfd.reset_work); @@ -6293,13 +6427,15 @@ static void amdgpu_device_halt_activities(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - drm_client_dev_suspend(adev_to_drm(tmp_adev), false); + drm_client_dev_suspend(adev_to_drm(tmp_adev)); /* disable ras on ALL IPs */ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) && amdgpu_device_ip_need_full_reset(tmp_adev)) amdgpu_ras_suspend(tmp_adev); + amdgpu_userq_pre_reset(tmp_adev); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; @@ -6529,6 +6665,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, goto end_reset; } + /* Cannot be called after locking reset domain */ + amdgpu_ras_pre_reset(adev, &device_list); + /* We need to lock reset domain only once both for XGMI and single device */ amdgpu_device_recovery_get_reset_lock(adev, &device_list); @@ -6542,7 +6681,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * * job->base holds a reference to parent fence */ - if (job && dma_fence_is_signaled(&job->hw_fence.base)) { + if (job && dma_fence_is_signaled(&job->hw_fence->base)) { job_signaled = true; dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); goto skip_hw_reset; @@ -6559,6 +6698,7 @@ skip_sched_resume: amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart); reset_unlock: amdgpu_device_recovery_put_reset_lock(adev, &device_list); + amdgpu_ras_post_reset(adev, &device_list); end_reset: if (hive) { mutex_unlock(&hive->hive_lock); @@ -7286,10 +7426,17 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev, if (adev->gmc.xgmi.connected_to_cpu) return; - if (ring && ring->funcs->emit_hdp_flush) + if (ring && ring->funcs->emit_hdp_flush) { amdgpu_ring_emit_hdp_flush(ring); - else - amdgpu_asic_flush_hdp(adev, ring); + return; + } + + if (!ring && amdgpu_sriov_runtime(adev)) { + if (!amdgpu_kiq_hdp_flush(adev)) + return; + } + + amdgpu_hdp_flush(adev, ring); } void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, @@ -7302,7 +7449,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, if (adev->gmc.xgmi.connected_to_cpu) return; - amdgpu_asic_invalidate_hdp(adev, ring); + amdgpu_hdp_invalidate(adev, ring); } int amdgpu_in_reset(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index dd7b2b796427..fa2a22dfa048 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -107,6 +107,7 @@ #include "vcn_v5_0_1.h" #include "jpeg_v5_0_0.h" #include "jpeg_v5_0_1.h" +#include "amdgpu_ras_mgr.h" #include "amdgpu_vpe.h" #if defined(CONFIG_DRM_AMD_ISP) @@ -254,9 +255,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; /* This region is read-only and reserved from system use */ - discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC); + discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC); if (discv_regn) { - memcpy(binary, discv_regn, adev->mman.discovery_tmr_size); + memcpy(binary, discv_regn, adev->discovery.size); memunmap(discv_regn); return 0; } @@ -298,10 +299,31 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, else vram_size <<= 20; + /* + * If in VRAM, discovery TMR is marked for reservation. If it is in system mem, + * then it is not required to be reserved. + */ if (sz_valid) { - uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; - amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, - adev->mman.discovery_tmr_size, false); + if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) { + /* For SRIOV VFs with dynamic critical region enabled, + * we will get the IPD binary via below call. + * If dynamic critical is disabled, fall through to normal seq. + */ + if (amdgpu_virt_get_dynamic_data_info(adev, + AMD_SRIOV_MSG_IPD_TABLE_ID, binary, + &adev->discovery.size)) { + dev_err(adev->dev, + "failed to read discovery info from dynamic critical region."); + ret = -EINVAL; + goto exit; + } + } else { + uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; + + amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, + adev->discovery.size, false); + adev->discovery.reserve_tmr = true; + } } else { ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); } @@ -310,7 +332,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, dev_err(adev->dev, "failed to read discovery info from memory, vram size read: %llx", vram_size); - +exit: return ret; } @@ -389,6 +411,7 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, struct binary_header *bhdr) { + uint8_t *discovery_bin = adev->discovery.bin; struct table_info *info; uint16_t checksum; uint16_t offset; @@ -398,14 +421,14 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, checksum = le16_to_cpu(info->checksum); struct nps_info_header *nhdr = - (struct nps_info_header *)(adev->mman.discovery_bin + offset); + (struct nps_info_header *)(discovery_bin + offset); if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) { dev_dbg(adev->dev, "invalid ip discovery nps info table id\n"); return -EINVAL; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, le32_to_cpu(nhdr->size_bytes), checksum)) { dev_dbg(adev->dev, "invalid nps info data table checksum\n"); @@ -417,8 +440,11 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) { - if (amdgpu_discovery == 2) + if (amdgpu_discovery == 2) { + /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */ + adev->discovery.reserve_tmr = true; return "amdgpu/ip_discovery.bin"; + } switch (adev->asic_type) { case CHIP_VEGA10: @@ -447,49 +473,53 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) { struct table_info *info; struct binary_header *bhdr; + uint8_t *discovery_bin; const char *fw_name; uint16_t offset; uint16_t size; uint16_t checksum; int r; - adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; - adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); - if (!adev->mman.discovery_bin) + adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); + if (!adev->discovery.bin) return -ENOMEM; + adev->discovery.size = DISCOVERY_TMR_SIZE; + adev->discovery.debugfs_blob.data = adev->discovery.bin; + adev->discovery.debugfs_blob.size = adev->discovery.size; + discovery_bin = adev->discovery.bin; /* Read from file if it is the preferred option */ fw_name = amdgpu_discovery_get_fw_name(adev); if (fw_name != NULL) { drm_dbg(&adev->ddev, "use ip discovery information from file"); - r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name); + r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin, + fw_name); if (r) goto out; } else { drm_dbg(&adev->ddev, "use ip discovery information from memory"); - r = amdgpu_discovery_read_binary_from_mem( - adev, adev->mman.discovery_bin); + r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin); if (r) goto out; } /* check the ip discovery binary signature */ - if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { + if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) { dev_err(adev->dev, "get invalid ip discovery binary signature\n"); r = -EINVAL; goto out; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = offsetof(struct binary_header, binary_checksum) + sizeof(bhdr->binary_checksum); size = le16_to_cpu(bhdr->binary_size) - offset; checksum = le16_to_cpu(bhdr->binary_checksum); - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - size, checksum)) { + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size, + checksum)) { dev_err(adev->dev, "invalid ip discovery binary checksum\n"); r = -EINVAL; goto out; @@ -501,15 +531,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct ip_discovery_header *ihdr = - (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); + (struct ip_discovery_header *)(discovery_bin + offset); if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { dev_err(adev->dev, "invalid ip discovery data table signature\n"); r = -EINVAL; goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le16_to_cpu(ihdr->size), checksum)) { + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, + le16_to_cpu(ihdr->size), + checksum)) { dev_err(adev->dev, "invalid ip discovery data table checksum\n"); r = -EINVAL; goto out; @@ -522,7 +553,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct gpu_info_header *ghdr = - (struct gpu_info_header *)(adev->mman.discovery_bin + offset); + (struct gpu_info_header *)(discovery_bin + offset); if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { dev_err(adev->dev, "invalid ip discovery gc table id\n"); @@ -530,8 +561,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le32_to_cpu(ghdr->size), checksum)) { + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, + le32_to_cpu(ghdr->size), + checksum)) { dev_err(adev->dev, "invalid gc data table checksum\n"); r = -EINVAL; goto out; @@ -544,7 +576,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct harvest_info_header *hhdr = - (struct harvest_info_header *)(adev->mman.discovery_bin + offset); + (struct harvest_info_header *)(discovery_bin + offset); if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); @@ -552,8 +584,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - sizeof(struct harvest_table), checksum)) { + if (!amdgpu_discovery_verify_checksum( + discovery_bin + offset, + sizeof(struct harvest_table), checksum)) { dev_err(adev->dev, "invalid harvest data table checksum\n"); r = -EINVAL; goto out; @@ -566,7 +599,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct vcn_info_header *vhdr = - (struct vcn_info_header *)(adev->mman.discovery_bin + offset); + (struct vcn_info_header *)(discovery_bin + offset); if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { dev_err(adev->dev, "invalid ip discovery vcn table id\n"); @@ -574,8 +607,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le32_to_cpu(vhdr->size_bytes), checksum)) { + if (!amdgpu_discovery_verify_checksum( + discovery_bin + offset, + le32_to_cpu(vhdr->size_bytes), checksum)) { dev_err(adev->dev, "invalid vcn data table checksum\n"); r = -EINVAL; goto out; @@ -588,7 +622,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (0 && offset) { struct mall_info_header *mhdr = - (struct mall_info_header *)(adev->mman.discovery_bin + offset); + (struct mall_info_header *)(discovery_bin + offset); if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { dev_err(adev->dev, "invalid ip discovery mall table id\n"); @@ -596,8 +630,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le32_to_cpu(mhdr->size_bytes), checksum)) { + if (!amdgpu_discovery_verify_checksum( + discovery_bin + offset, + le32_to_cpu(mhdr->size_bytes), checksum)) { dev_err(adev->dev, "invalid mall data table checksum\n"); r = -EINVAL; goto out; @@ -607,8 +642,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) return 0; out: - kfree(adev->mman.discovery_bin); - adev->mman.discovery_bin = NULL; + kfree(adev->discovery.bin); + adev->discovery.bin = NULL; if ((amdgpu_discovery != 2) && (RREG32(mmIP_DISCOVERY_VERSION) == 4)) amdgpu_ras_query_boot_status(adev, 4); @@ -620,8 +655,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); void amdgpu_discovery_fini(struct amdgpu_device *adev) { amdgpu_discovery_sysfs_fini(adev); - kfree(adev->mman.discovery_bin); - adev->mman.discovery_bin = NULL; + kfree(adev->discovery.bin); + adev->discovery.bin = NULL; } static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, @@ -646,6 +681,7 @@ static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, uint32_t *vcn_harvest_count) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; @@ -655,21 +691,21 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, uint8_t inst; int i, j; - bhdr = (struct binary_header *)adev->mman.discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); + bhdr = (struct binary_header *)discovery_bin; + ihdr = (struct ip_discovery_header + *)(discovery_bin + + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); /* scan harvest bit of all IP data structures */ for (i = 0; i < num_dies; i++) { die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); + dhdr = (struct die_header *)(discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); for (j = 0; j < num_ips; j++) { - ip = (struct ip *)(adev->mman.discovery_bin + - ip_offset); + ip = (struct ip *)(discovery_bin + ip_offset); inst = ip->number_instance; hw_id = le16_to_cpu(ip->hw_id); if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) @@ -711,13 +747,14 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, uint32_t *vcn_harvest_count, uint32_t *umc_harvest_count) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; struct harvest_table *harvest_info; u16 offset; int i; uint32_t umc_harvest_config = 0; - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); if (!offset) { @@ -725,7 +762,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, return; } - harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset); + harvest_info = (struct harvest_table *)(discovery_bin + offset); for (i = 0; i < 32; i++) { if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) @@ -1021,8 +1058,8 @@ static void ip_disc_release(struct kobject *kobj) kobj); struct amdgpu_device *adev = ip_top->adev; - adev->ip_top = NULL; kfree(ip_top); + adev->discovery.ip_top = NULL; } static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, @@ -1062,6 +1099,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, const size_t _ip_offset, const int num_ips, bool reg_base_64) { + uint8_t *discovery_bin = adev->discovery.bin; int ii, jj, kk, res; uint16_t hw_id; uint8_t inst; @@ -1079,7 +1117,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, struct ip_v4 *ip; struct ip_hw_instance *ip_hw_instance; - ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); + ip = (struct ip_v4 *)(discovery_bin + ip_offset); inst = ip->instance_number; hw_id = le16_to_cpu(ip->hw_id); if (amdgpu_discovery_validate_ip(adev, inst, hw_id) || @@ -1166,17 +1204,20 @@ next_ip: static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) { + struct ip_discovery_top *ip_top = adev->discovery.ip_top; + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; - struct kset *die_kset = &adev->ip_top->die_kset; + struct kset *die_kset = &ip_top->die_kset; u16 num_dies, die_offset, num_ips; size_t ip_offset; int ii, res; - bhdr = (struct binary_header *)adev->mman.discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); + bhdr = (struct binary_header *)discovery_bin; + ihdr = (struct ip_discovery_header + *)(discovery_bin + + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); DRM_DEBUG("number of dies: %d\n", num_dies); @@ -1185,7 +1226,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) struct ip_die_entry *ip_die_entry; die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); - dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); + dhdr = (struct die_header *)(discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); @@ -1219,30 +1260,32 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; + struct ip_discovery_top *ip_top; struct kset *die_kset; int res, ii; - if (!adev->mman.discovery_bin) + if (!discovery_bin) return -EINVAL; - adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); - if (!adev->ip_top) + ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL); + if (!ip_top) return -ENOMEM; - adev->ip_top->adev = adev; - - res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype, + ip_top->adev = adev; + adev->discovery.ip_top = ip_top; + res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype, &adev->dev->kobj, "ip_discovery"); if (res) { DRM_ERROR("Couldn't init and add ip_discovery/"); goto Err; } - die_kset = &adev->ip_top->die_kset; + die_kset = &ip_top->die_kset; kobject_set_name(&die_kset->kobj, "%s", "die"); - die_kset->kobj.parent = &adev->ip_top->kobj; + die_kset->kobj.parent = &ip_top->kobj; die_kset->kobj.ktype = &die_kobj_ktype; - res = kset_register(&adev->ip_top->die_kset); + res = kset_register(&ip_top->die_kset); if (res) { DRM_ERROR("Couldn't register die_kset"); goto Err; @@ -1256,7 +1299,7 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) return res; Err: - kobject_put(&adev->ip_top->kobj); + kobject_put(&ip_top->kobj); return res; } @@ -1301,10 +1344,11 @@ static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) { + struct ip_discovery_top *ip_top = adev->discovery.ip_top; struct list_head *el, *tmp; struct kset *die_kset; - die_kset = &adev->ip_top->die_kset; + die_kset = &ip_top->die_kset; spin_lock(&die_kset->list_lock); list_for_each_prev_safe(el, tmp, &die_kset->list) { list_del_init(el); @@ -1313,8 +1357,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) spin_lock(&die_kset->list_lock); } spin_unlock(&die_kset->list_lock); - kobject_put(&adev->ip_top->die_kset.kobj); - kobject_put(&adev->ip_top->kobj); + kobject_put(&ip_top->die_kset.kobj); + kobject_put(&ip_top->kobj); } /* ================================================== */ @@ -1325,6 +1369,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; + uint8_t *discovery_bin; struct ip_v4 *ip; uint16_t die_offset; uint16_t ip_offset; @@ -1340,22 +1385,23 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) r = amdgpu_discovery_init(adev); if (r) return r; - + discovery_bin = adev->discovery.bin; wafl_ver = 0; adev->gfx.xcc_mask = 0; adev->sdma.sdma_mask = 0; adev->vcn.inst_mask = 0; adev->jpeg.inst_mask = 0; - bhdr = (struct binary_header *)adev->mman.discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); + bhdr = (struct binary_header *)discovery_bin; + ihdr = (struct ip_discovery_header + *)(discovery_bin + + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); DRM_DEBUG("number of dies: %d\n", num_dies); for (i = 0; i < num_dies; i++) { die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); + dhdr = (struct die_header *)(discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); @@ -1369,7 +1415,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) le16_to_cpu(dhdr->die_id), num_ips); for (j = 0; j < num_ips; j++) { - ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); + ip = (struct ip_v4 *)(discovery_bin + ip_offset); inst = ip->instance_number; hw_id = le16_to_cpu(ip->hw_id); @@ -1519,16 +1565,16 @@ next_ip: static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct ip_discovery_header *ihdr; struct binary_header *bhdr; int vcn_harvest_count = 0; int umc_harvest_count = 0; uint16_t offset, ihdr_ver; - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset); - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - offset); + ihdr = (struct ip_discovery_header *)(discovery_bin + offset); ihdr_ver = le16_to_cpu(ihdr->version); /* * Harvest table does not fit Navi1x and legacy GPUs, @@ -1575,22 +1621,23 @@ union gc_info { static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; union gc_info *gc_info; u16 offset; - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[GC].offset); if (!offset) return 0; - gc_info = (union gc_info *)(adev->mman.discovery_bin + offset); + gc_info = (union gc_info *)(discovery_bin + offset); switch (le16_to_cpu(gc_info->v1.header.version_major)) { case 1: @@ -1683,24 +1730,25 @@ union mall_info { static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; union mall_info *mall_info; u32 u, mall_size_per_umc, m_s_present, half_use; u64 mall_size; u16 offset; - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); if (!offset) return 0; - mall_info = (union mall_info *)(adev->mman.discovery_bin + offset); + mall_info = (union mall_info *)(discovery_bin + offset); switch (le16_to_cpu(mall_info->v1.header.version_major)) { case 1: @@ -1739,12 +1787,13 @@ union vcn_info { static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; union vcn_info *vcn_info; u16 offset; int v; - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } @@ -1759,13 +1808,13 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); if (!offset) return 0; - vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset); + vcn_info = (union vcn_info *)(discovery_bin + offset); switch (le16_to_cpu(vcn_info->v1.header.version_major)) { case 1: @@ -1825,6 +1874,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, struct amdgpu_gmc_memrange **ranges, int *range_cnt, bool refresh) { + uint8_t *discovery_bin = adev->discovery.bin; struct amdgpu_gmc_memrange *mem_ranges; struct binary_header *bhdr; union nps_info *nps_info; @@ -1841,13 +1891,13 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, return r; nps_info = &nps_data; } else { - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { dev_err(adev->dev, "fetch mem range failed, ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); if (!offset) @@ -1857,8 +1907,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) return -ENOENT; - nps_info = - (union nps_info *)(adev->mman.discovery_bin + offset); + nps_info = (union nps_info *)(discovery_bin + offset); } switch (le16_to_cpu(nps_info->v1.header.version_major)) { @@ -2361,6 +2410,21 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) amdgpu_ip_version(adev, SDMA0_HWIP, 0)); return -EINVAL; } + + return 0; +} + +static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev) +{ + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 12): + case IP_VERSION(13, 0, 14): + amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block); + break; + default: + break; + } return 0; } @@ -3141,6 +3205,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) if (r) return r; + r = amdgpu_discovery_set_ras_ip_blocks(adev); + if (r) + return r; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && !amdgpu_sriov_vf(adev)) || (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index b44d56465c5b..4ce04486cc31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -24,9 +24,21 @@ #ifndef __AMDGPU_DISCOVERY__ #define __AMDGPU_DISCOVERY__ +#include <linux/debugfs.h> + #define DISCOVERY_TMR_SIZE (10 << 10) #define DISCOVERY_TMR_OFFSET (64 << 10) +struct ip_discovery_top; + +struct amdgpu_discovery_info { + struct debugfs_blob_wrapper debugfs_blob; + struct ip_discovery_top *ip_top; + uint32_t size; + uint8_t *bin; + bool reserve_tmr; +}; + void amdgpu_discovery_fini(struct amdgpu_device *adev); int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 51bab32fd8c6..b5d34797d606 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -332,8 +332,6 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, if (crtc->enabled) active = true; - pm_runtime_mark_last_busy(dev->dev); - adev = drm_to_adev(dev); /* if we have active crtcs and we don't have a power ref, * take the current one @@ -1365,6 +1363,64 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = { { AMDGPU_FMT_DITHER_ENABLE, "on" }, }; +/** + * DOC: property for adaptive backlight modulation + * + * The 'adaptive backlight modulation' property is used for the compositor to + * directly control the adaptive backlight modulation power savings feature + * that is part of DCN hardware. + * + * The property will be attached specifically to eDP panels that support it. + * + * The property is by default set to 'sysfs' to allow the sysfs file 'panel_power_savings' + * to be able to control it. + * If set to 'off' the compositor will ensure it stays off. + * The other values 'min', 'bias min', 'bias max', and 'max' will control the + * intensity of the power savings. + * + * Modifying this value can have implications on color accuracy, so tread + * carefully. + */ +static int amdgpu_display_setup_abm_prop(struct amdgpu_device *adev) +{ + const struct drm_prop_enum_list props[] = { + { ABM_SYSFS_CONTROL, "sysfs" }, + { ABM_LEVEL_OFF, "off" }, + { ABM_LEVEL_MIN, "min" }, + { ABM_LEVEL_BIAS_MIN, "bias min" }, + { ABM_LEVEL_BIAS_MAX, "bias max" }, + { ABM_LEVEL_MAX, "max" }, + }; + struct drm_property *prop; + int i; + + if (!adev->dc_enabled) + return 0; + + prop = drm_property_create(adev_to_drm(adev), DRM_MODE_PROP_ENUM, + "adaptive backlight modulation", + 6); + if (!prop) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + int ret; + + ret = drm_property_add_enum(prop, props[i].type, + props[i].name); + + if (ret) { + drm_property_destroy(adev_to_drm(adev), prop); + + return ret; + } + } + + adev->mode_info.abm_level_property = prop; + + return 0; +} + int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) { int sz; @@ -1411,7 +1467,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) "dither", amdgpu_dither_enum_list, sz); - return 0; + return amdgpu_display_setup_abm_prop(adev); } void amdgpu_display_update_priority(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index 930c171473b4..49a29bf47a37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -55,4 +55,11 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev); int amdgpu_display_get_scanout_buffer(struct drm_plane *plane, struct drm_scanout_buffer *sb); +#define ABM_SYSFS_CONTROL -1 +#define ABM_LEVEL_OFF 0 +#define ABM_LEVEL_MIN 1 +#define ABM_LEVEL_BIAS_MIN 2 +#define ABM_LEVEL_BIAS_MAX 3 +#define ABM_LEVEL_MAX 4 + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 8561ad7f6180..e22cfa7c6d32 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -81,13 +81,44 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + int r; + + /* + * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+. + * Such buffers cannot be safely accessed over P2P due to device-local + * compression metadata. Fallback to system-memory path instead. + * Device supports GFX12 (GC 12.x or newer) + * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag + * + */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) && + bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC) + attach->peer2peer = false; + + /* + * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+. + * Such buffers cannot be safely accessed over P2P due to device-local + * compression metadata. Fallback to system-memory path instead. + * Device supports GFX12 (GC 12.x or newer) + * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag + * + */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) && + bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC) + attach->peer2peer = false; if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) && pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) attach->peer2peer = false; + r = dma_resv_lock(bo->tbo.base.resv, NULL); + if (r) + return r; + amdgpu_vm_bo_update_shared(bo); + dma_resv_unlock(bo->tbo.base.resv); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 61268aa82df4..2dfbddcef9ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -312,7 +312,7 @@ module_param_named(moverate, amdgpu_moverate, int, 0600); * DOC: audio (int) * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it. */ -MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)"); +MODULE_PARM_DESC(audio, "HDMI/DP Audio enable for non DC displays (-1 = auto, 0 = disable, 1 = enable)"); module_param_named(audio, amdgpu_audio, int, 0444); /** @@ -354,22 +354,16 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint * DOC: lockup_timeout (string) * Set GPU scheduler timeout value in ms. * - * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or - * multiple values specified. 0 and negative values are invalidated. They will be adjusted - * to the default timeout. + * The format can be [single value] for setting all timeouts at once or + * [GFX,Compute,SDMA,Video] to set individual timeouts. + * Negative values mean infinity. * - * - With one value specified, the setting will apply to all non-compute jobs. - * - With multiple values specified, the first one will be for GFX. - * The second one is for Compute. The third and fourth ones are - * for SDMA and Video. - * - * By default(with no lockup_timeout settings), the timeout for all jobs is 10000. + * By default(with no lockup_timeout settings), the timeout for all queues is 2000. */ MODULE_PARM_DESC(lockup_timeout, - "GPU lockup timeout in ms (default: 10000 for all jobs. " - "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " - "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); -module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); + "GPU lockup timeout in ms (default: 2000. 0: keep default value. negative: infinity timeout), format: [single value for all] or [GFX,Compute,SDMA,Video]."); +module_param_string(lockup_timeout, amdgpu_lockup_timeout, + sizeof(amdgpu_lockup_timeout), 0444); /** * DOC: dpm (int) @@ -624,39 +618,39 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644); /** * DOC: si_support (int) - * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled, - * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available, - * otherwise using amdgpu driver. - */ + * 1 = enabled, 0 = disabled, -1 = default + * + * SI (Southern Islands) are first generation GCN GPUs, supported by both + * drivers: radeon (old) and amdgpu (new). This parameter controls whether + * amdgpu should support SI. + * By default, SI dedicated GPUs are supported by amdgpu. + * Only relevant when CONFIG_DRM_AMDGPU_SI is enabled to build SI support in amdgpu. + * See also radeon.si_support which should be disabled when amdgpu.si_support is + * enabled, and vice versa. + */ +int amdgpu_si_support = -1; #ifdef CONFIG_DRM_AMDGPU_SI - -#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) -int amdgpu_si_support; -MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))"); -#else -int amdgpu_si_support = 1; -MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)"); -#endif - +MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled, -1 = default)"); module_param_named(si_support, amdgpu_si_support, int, 0444); #endif /** * DOC: cik_support (int) - * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled, - * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available, - * otherwise using amdgpu driver. - */ + * 1 = enabled, 0 = disabled, -1 = default + * + * CIK (Sea Islands) are second generation GCN GPUs, supported by both + * drivers: radeon (old) and amdgpu (new). This parameter controls whether + * amdgpu should support CIK. + * By default: + * - CIK dedicated GPUs are supported by amdgpu. + * - CIK APUs are supported by radeon (except when radeon is not built). + * Only relevant when CONFIG_DRM_AMDGPU_CIK is enabled to build CIK support in amdgpu. + * See also radeon.cik_support which should be disabled when amdgpu.cik_support is + * enabled, and vice versa. + */ +int amdgpu_cik_support = -1; #ifdef CONFIG_DRM_AMDGPU_CIK - -#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) -int amdgpu_cik_support; -MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))"); -#else -int amdgpu_cik_support = 1; -MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)"); -#endif - +MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled, -1 = default)"); module_param_named(cik_support, amdgpu_cik_support, int, 0444); #endif @@ -2234,7 +2228,6 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) adev->pdev->bus->number, i); if (p) { pm_runtime_get_sync(&p->dev); - pm_runtime_mark_last_busy(&p->dev); pm_runtime_put_autosuspend(&p->dev); pci_dev_put(p); } @@ -2313,6 +2306,72 @@ static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long fl return flags; } +static bool amdgpu_support_enabled(struct device *dev, + const enum amd_asic_type family) +{ + const char *gen; + const char *param; + int module_param = -1; + bool radeon_support_built = IS_ENABLED(CONFIG_DRM_RADEON); + bool amdgpu_support_built = false; + bool support_by_default = false; + + switch (family) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + gen = "SI"; + param = "si_support"; + module_param = amdgpu_si_support; + amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_SI); + support_by_default = true; + break; + + case CHIP_BONAIRE: + case CHIP_HAWAII: + support_by_default = true; + fallthrough; + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + gen = "CIK"; + param = "cik_support"; + module_param = amdgpu_cik_support; + amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_CIK); + break; + + default: + /* All other chips are supported by amdgpu only */ + return true; + } + + if (!amdgpu_support_built) { + dev_info(dev, "amdgpu built without %s support\n", gen); + return false; + } + + if ((module_param == -1 && (support_by_default || !radeon_support_built)) || + module_param == 1) { + if (radeon_support_built) + dev_info(dev, "%s support provided by amdgpu.\n" + "Use radeon.%s=1 amdgpu.%s=0 to override.\n", + gen, param, param); + + return true; + } + + if (radeon_support_built) + dev_info(dev, "%s support provided by radeon.\n" + "Use radeon.%s=0 amdgpu.%s=1 to override.\n", + gen, param, param); + else if (module_param == 0) + dev_info(dev, "%s support disabled by module param\n", gen); + + return false; +} + static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2360,48 +2419,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENOTSUPP; } - switch (flags & AMD_ASIC_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: -#ifdef CONFIG_DRM_AMDGPU_SI - if (!amdgpu_si_support) { - dev_info(&pdev->dev, - "SI support provided by radeon.\n"); - dev_info(&pdev->dev, - "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" - ); - return -ENODEV; - } - break; -#else - dev_info(&pdev->dev, "amdgpu is built without SI support.\n"); + if (!amdgpu_support_enabled(&pdev->dev, flags & AMD_ASIC_MASK)) return -ENODEV; -#endif - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: -#ifdef CONFIG_DRM_AMDGPU_CIK - if (!amdgpu_cik_support) { - dev_info(&pdev->dev, - "CIK support provided by radeon.\n"); - dev_info(&pdev->dev, - "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" - ); - return -ENODEV; - } - break; -#else - dev_info(&pdev->dev, "amdgpu is built without CIK support.\n"); - return -ENODEV; -#endif - default: - break; - } adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); if (IS_ERR(adev)) @@ -2480,7 +2499,6 @@ retry_init: pm_runtime_allow(ddev->dev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); pci_wake_from_d3(pdev, TRUE); @@ -2564,7 +2582,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) */ if (!amdgpu_passthrough(adev)) adev->mp1_state = PP_MP1_STATE_UNLOAD; - amdgpu_device_ip_suspend(adev); + amdgpu_device_prepare(dev); + amdgpu_device_suspend(dev, true); adev->mp1_state = PP_MP1_STATE_NONE; } @@ -2632,9 +2651,14 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - if (amdgpu_acpi_should_gpu_reset(adev)) - return amdgpu_asic_reset(adev); + if (amdgpu_acpi_should_gpu_reset(adev)) { + amdgpu_device_lock_reset_domain(adev->reset_domain); + r = amdgpu_asic_reset(adev); + amdgpu_device_unlock_reset_domain(adev->reset_domain); + return r; + } return 0; } @@ -2777,22 +2801,8 @@ static int amdgpu_runtime_idle_check_userq(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; - int ret = 0; - - mutex_lock(&adev->userq_mutex); - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - ret = -EBUSY; - goto done; - } - } -done: - mutex_unlock(&adev->userq_mutex); - return ret; + return xa_empty(&adev->userq_doorbell_xa) ? 0 : -EBUSY; } static int amdgpu_pmops_runtime_suspend(struct device *dev) @@ -2939,7 +2949,6 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) ret = amdgpu_runtime_idle_check_userq(dev); done: - pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); return ret; } @@ -2975,7 +2984,6 @@ long amdgpu_drm_ioctl(struct file *filp, ret = drm_ioctl(filp, cmd, arg); - pm_runtime_mark_last_busy(dev->dev); out: pm_runtime_put_autosuspend(dev->dev); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 18a7829122d2..c7843e336310 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -45,16 +45,11 @@ * Cast helper */ static const struct dma_fence_ops amdgpu_fence_ops; -static const struct dma_fence_ops amdgpu_job_fence_ops; static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) { struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); - if (__f->base.ops == &amdgpu_fence_ops || - __f->base.ops == &amdgpu_job_fence_ops) - return __f; - - return NULL; + return __f; } /** @@ -98,51 +93,32 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * amdgpu_fence_emit - emit a fence on the requested ring * * @ring: ring the fence is associated with - * @f: resulting fence object * @af: amdgpu fence input * @flags: flags to pass into the subordinate .emit_fence() call * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, - struct amdgpu_fence *af, unsigned int flags) +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af, + unsigned int flags) { struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; - struct amdgpu_fence *am_fence; struct dma_fence __rcu **ptr; uint32_t seq; int r; - if (!af) { - /* create a separate hw fence */ - am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL); - if (!am_fence) - return -ENOMEM; - } else { - am_fence = af; - } - fence = &am_fence->base; - am_fence->ring = ring; + fence = &af->base; + af->ring = ring; seq = ++ring->fence_drv.sync_seq; - am_fence->seq = seq; - if (af) { - dma_fence_init(fence, &amdgpu_job_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - /* Against remove in amdgpu_job_{free, free_cb} */ - dma_fence_get(fence); - } else { - dma_fence_init(fence, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - } + dma_fence_init(fence, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); - amdgpu_fence_save_wptr(fence); + amdgpu_fence_save_wptr(af); pm_runtime_get_noresume(adev_to_drm(adev)->dev); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { @@ -167,8 +143,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, */ rcu_assign_pointer(*ptr, dma_fence_get(fence)); - *f = fence; - return 0; } @@ -276,7 +250,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) drv->signalled_wptr = am_fence->wptr; dma_fence_signal(fence); dma_fence_put(fence); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } while (last_seq != seq); @@ -670,36 +643,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) } /** - * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring - * - * @ring: fence of the ring to be cleared - * - */ -void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) -{ - int i; - struct dma_fence *old, **ptr; - - for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) { - ptr = &ring->fence_drv.fences[i]; - old = rcu_dereference_protected(*ptr, 1); - if (old && old->ops == &amdgpu_job_fence_ops) { - struct amdgpu_job *job; - - /* For non-scheduler bad job, i.e. failed ib test, we need to signal - * it right here or we won't be able to track them in fence_drv - * and they will remain unsignaled during sa_bo free. - */ - job = container_of(old, struct amdgpu_job, hw_fence.base); - if (!job->base.s_fence && !dma_fence_is_signaled(old)) - dma_fence_signal(old); - RCU_INIT_POINTER(*ptr, NULL); - dma_fence_put(old); - } - } -} - -/** * amdgpu_fence_driver_set_error - set error code on fences * @ring: the ring which contains the fences * @error: the error code to set @@ -755,7 +698,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) /** * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence * - * @fence: fence of the ring to signal + * @af: fence of the ring to signal * */ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af) @@ -792,15 +735,13 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af) } while (last_seq != seq); spin_unlock_irqrestore(&ring->fence_drv.lock, flags); /* signal the guilty fence */ - amdgpu_fence_write(ring, af->seq); + amdgpu_fence_write(ring, (u32)af->base.seqno); amdgpu_fence_process(ring); } -void amdgpu_fence_save_wptr(struct dma_fence *fence) +void amdgpu_fence_save_wptr(struct amdgpu_fence *af) { - struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base); - - am_fence->wptr = am_fence->ring->wptr; + af->wptr = af->ring->wptr; } static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, @@ -866,13 +807,6 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) return (const char *)to_amdgpu_fence(f)->ring->name; } -static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) -{ - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); - - return (const char *)to_amdgpu_ring(job->base.sched)->name; -} - /** * amdgpu_fence_enable_signaling - enable signalling on fence * @f: fence @@ -890,23 +824,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) } /** - * amdgpu_job_fence_enable_signaling - enable signalling on job fence - * @f: fence - * - * This is the simliar function with amdgpu_fence_enable_signaling above, it - * only handles the job embedded fence. - */ -static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) -{ - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); - - if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) - amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); - - return true; -} - -/** * amdgpu_fence_free - free up the fence memory * * @rcu: RCU callback head @@ -922,21 +839,6 @@ static void amdgpu_fence_free(struct rcu_head *rcu) } /** - * amdgpu_job_fence_free - free up the job with embedded fence - * - * @rcu: RCU callback head - * - * Free up the job with embedded fence after the RCU grace period. - */ -static void amdgpu_job_fence_free(struct rcu_head *rcu) -{ - struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); - - /* free job if fence has a parent job */ - kfree(container_of(f, struct amdgpu_job, hw_fence.base)); -} - -/** * amdgpu_fence_release - callback that fence can be freed * * @f: fence @@ -949,19 +851,6 @@ static void amdgpu_fence_release(struct dma_fence *f) call_rcu(&f->rcu, amdgpu_fence_free); } -/** - * amdgpu_job_fence_release - callback that job embedded fence can be freed - * - * @f: fence - * - * This is the simliar function with amdgpu_fence_release above, it - * only handles the job embedded fence. - */ -static void amdgpu_job_fence_release(struct dma_fence *f) -{ - call_rcu(&f->rcu, amdgpu_job_fence_free); -} - static const struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, @@ -969,13 +858,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = { .release = amdgpu_fence_release, }; -static const struct dma_fence_ops amdgpu_job_fence_ops = { - .get_driver_name = amdgpu_fence_get_driver_name, - .get_timeline_name = amdgpu_job_fence_get_timeline_name, - .enable_signaling = amdgpu_job_fence_enable_signaling, - .release = amdgpu_job_fence_release, -}; - /* * Fence debugfs */ @@ -1045,7 +927,6 @@ static int gpu_recover_get(void *data, u64 *val) *val = atomic_read(&adev->reset_domain->reset_res); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index b2033f8352f5..d2237ce9da70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -302,7 +302,6 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages) { unsigned t; - unsigned p; int i, j; u64 page_base; /* Starting from VEGA10, system bit must be 0 to mean invalid. */ @@ -316,8 +315,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, return; t = offset / AMDGPU_GPU_PAGE_SIZE; - p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; - for (i = 0; i < pages; i++, p++) { + for (i = 0; i < pages; i++) { page_base = adev->dummy_page_addr; if (!adev->gart.ptr) continue; @@ -370,6 +368,42 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, } /** + * amdgpu_gart_map_vram_range - map VRAM pages into the GART page table + * + * @adev: amdgpu_device pointer + * @pa: physical address of the first page to be mapped + * @start_page: first page to map in the GART aperture + * @num_pages: number of pages to be mapped + * @flags: page table entry flags + * @dst: CPU address of the GART table + * + * Binds a BO that is allocated in VRAM to the GART page table + * (all ASICs). + * + * Useful when a kernel BO is located in VRAM but + * needs to be accessed from the GART address space. + */ +void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa, + uint64_t start_page, uint64_t num_pages, + uint64_t flags, void *dst) +{ + u32 i, idx; + + /* The SYSTEM flag indicates the pages aren't in VRAM. */ + WARN_ON_ONCE(flags & AMDGPU_PTE_SYSTEM); + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return; + + for (i = 0; i < num_pages; ++i) { + amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr, + start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags); + } + + drm_dev_exit(idx); +} + +/** * amdgpu_gart_bind - bind pages into the gart page table * * @adev: amdgpu_device pointer diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index 7cc980bf4725..d3118275ddae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -64,5 +64,8 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, void *dst); void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, dma_addr_t *dma_addr, uint64_t flags); +void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa, + uint64_t start_page, uint64_t num_pages, + uint64_t flags, void *dst); void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index b7ebae289bea..3e38c5db2987 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -198,7 +198,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj) struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj); amdgpu_hmm_unregister(aobj); - ttm_bo_put(&aobj->tbo); + ttm_bo_fini(&aobj->tbo); } int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, @@ -531,7 +531,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_amdgpu_gem_userptr *args = data; struct amdgpu_fpriv *fpriv = filp->driver_priv; struct drm_gem_object *gobj; - struct hmm_range *range; + struct amdgpu_hmm_range *range; struct amdgpu_bo *bo; uint32_t handle; int r; @@ -572,10 +572,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, goto release_object; if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - r = amdgpu_ttm_tt_get_user_pages(bo, &range); - if (r) + range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!range)) + return -ENOMEM; + r = amdgpu_ttm_tt_get_user_pages(bo, range); + if (r) { + amdgpu_hmm_range_free(range); goto release_object; - + } r = amdgpu_bo_reserve(bo, true); if (r) goto user_pages_done; @@ -597,8 +601,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, user_pages_done: if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); - + amdgpu_hmm_range_free(range); release_object: drm_gem_object_put(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index ebe2b4c68b0f..8b118c53f351 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -33,6 +33,7 @@ #include "amdgpu_reset.h" #include "amdgpu_xcp.h" #include "amdgpu_xgmi.h" +#include "amdgpu_mes.h" #include "nvd.h" /* delay 0.1 second to enable gfx off feature */ @@ -1194,6 +1195,75 @@ failed_kiq_write: dev_err(adev->dev, "failed to write reg:%x\n", reg); } +int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *ring = &kiq->ring; + + if (amdgpu_device_skip_hw_access(adev)) + return 0; + + if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready) + return amdgpu_mes_hdp_flush(adev); + + if (!ring->funcs->emit_hdp_flush) { + return -EOPNOTSUPP; + } + + spin_lock_irqsave(&kiq->ring_lock, flags); + r = amdgpu_ring_alloc(ring, 32); + if (r) + goto failed_unlock; + + amdgpu_ring_emit_hdp_flush(ring); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) + goto failed_undo; + + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) + goto failed_kiq_hdp_flush; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + if (amdgpu_in_reset(adev)) + goto failed_kiq_hdp_flush; + + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) { + dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n"); + return -ETIMEDOUT; + } + + return 0; + +failed_undo: + amdgpu_ring_undo(ring); +failed_unlock: + spin_unlock_irqrestore(&kiq->ring_lock, flags); +failed_kiq_hdp_flush: + dev_err(adev->dev, "failed to flush HDP via KIQ\n"); + return r < 0 ? r : -EIO; +} + int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) { if (amdgpu_num_kcq == -1) { @@ -1600,7 +1670,6 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev, ret = amdgpu_gfx_run_cleaner_shader(adev, value); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (ret) @@ -2485,3 +2554,4 @@ void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev) &amdgpu_debugfs_compute_sched_mask_fops); #endif } + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index fb5f7a0ee029..efd61a1ccc66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -615,6 +615,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id); void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id); +int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev); int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 9dcf51991b5b..869bceb0fe2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -597,6 +597,9 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) /* reserve engine 5 for firmware */ if (adev->enable_mes) vm_inv_engs[i] &= ~(1 << 5); + /* reserve engine 6 for uni mes */ + if (adev->enable_uni_mes) + vm_inv_engs[i] &= ~(1 << 6); /* reserve mmhub engine 3 for firmware */ if (adev->enable_umsch_mm) vm_inv_engs[i] &= ~(1 << 3); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 55097ca10738..727342689d4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -86,6 +86,11 @@ enum amdgpu_memory_partition { #define AMDGPU_MAX_MEM_RANGES 8 +#define AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY 0x80 +#define AMDGPU_GMC9_FAULT_SOURCE_DATA_READ 0x40 +#define AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE 0x20 +#define AMDGPU_GMC9_FAULT_SOURCE_DATA_EXE 0x10 + /* * GMC page fault information */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 0760e70402ec..895c1e4c6747 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -284,6 +284,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size); start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; + start += amdgpu_vce_required_gart_pages(adev); size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; drm_mm_init(&mgr->mm, start, size); spin_lock_init(&mgr->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c index 6e02fb9ac2f6..5a60d69a3e1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c @@ -66,3 +66,19 @@ void amdgpu_hdp_generic_flush(struct amdgpu_device *adev, 0); } } + +void amdgpu_hdp_invalidate(struct amdgpu_device *adev, struct amdgpu_ring *ring) +{ + if (adev->asic_funcs && adev->asic_funcs->invalidate_hdp) + adev->asic_funcs->invalidate_hdp(adev, ring); + else if (adev->hdp.funcs && adev->hdp.funcs->invalidate_hdp) + adev->hdp.funcs->invalidate_hdp(adev, ring); +} + +void amdgpu_hdp_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring) +{ + if (adev->asic_funcs && adev->asic_funcs->flush_hdp) + adev->asic_funcs->flush_hdp(adev, ring); + else if (adev->hdp.funcs && adev->hdp.funcs->flush_hdp) + adev->hdp.funcs->flush_hdp(adev, ring); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h index 4cfd932b7e91..d9f488fa76b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -46,4 +46,8 @@ struct amdgpu_hdp { int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev); void amdgpu_hdp_generic_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring); +void amdgpu_hdp_invalidate(struct amdgpu_device *adev, + struct amdgpu_ring *ring); +void amdgpu_hdp_flush(struct amdgpu_device *adev, + struct amdgpu_ring *ring); #endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c index 2c6a6b858112..90d26d820bac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -168,17 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, void *owner, - struct hmm_range **phmm_range) + struct amdgpu_hmm_range *range) { - struct hmm_range *hmm_range; unsigned long end; unsigned long timeout; unsigned long *pfns; int r = 0; - - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); - if (unlikely(!hmm_range)) - return -ENOMEM; + struct hmm_range *hmm_range = &range->hmm_range; pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); if (unlikely(!pfns)) { @@ -221,28 +217,77 @@ retry: hmm_range->start = start; hmm_range->hmm_pfns = pfns; - *phmm_range = hmm_range; - return 0; out_free_pfns: kvfree(pfns); + hmm_range->hmm_pfns = NULL; out_free_range: - kfree(hmm_range); - if (r == -EBUSY) r = -EAGAIN; return r; } -bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) +/** + * amdgpu_hmm_range_valid - check if an HMM range is still valid + * @range: pointer to the &struct amdgpu_hmm_range to validate + * + * Determines whether the given HMM range @range is still valid by + * checking for invalidations via the MMU notifier sequence. This is + * typically used to verify that the range has not been invalidated + * by concurrent address space updates before it is accessed. + * + * Return: + * * true if @range is valid and can be used safely + * * false if @range is NULL or has been invalidated + */ +bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range) { - bool r; + if (!range) + return false; - r = mmu_interval_read_retry(hmm_range->notifier, - hmm_range->notifier_seq); - kvfree(hmm_range->hmm_pfns); - kfree(hmm_range); + return !mmu_interval_read_retry(range->hmm_range.notifier, + range->hmm_range.notifier_seq); +} - return r; +/** + * amdgpu_hmm_range_alloc - allocate and initialize an AMDGPU HMM range + * @bo: optional buffer object to associate with this HMM range + * + * Allocates memory for amdgpu_hmm_range and associates it with the @bo passed. + * The reference count of the @bo is incremented. + * + * Return: + * Pointer to a newly allocated struct amdgpu_hmm_range on success, + * or NULL if memory allocation fails. + */ +struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo) +{ + struct amdgpu_hmm_range *range; + + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (!range) + return NULL; + + range->bo = amdgpu_bo_ref(bo); + return range; +} + +/** + * amdgpu_hmm_range_free - release an AMDGPU HMM range + * @range: pointer to the range object to free + * + * Releases all resources held by @range, including the associated + * hmm_pfns and the dropping reference of associated bo if any. + * + * Return: void + */ +void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) +{ + if (!range) + return; + + kvfree(range->hmm_range.hmm_pfns); + amdgpu_bo_unref(&range->bo); + kfree(range); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h index 953e1d06de20..140bc9cd57b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h @@ -31,13 +31,20 @@ #include <linux/interval_tree.h> #include <linux/mmu_notifier.h> +struct amdgpu_hmm_range { + struct hmm_range hmm_range; + struct amdgpu_bo *bo; +}; + int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, void *owner, - struct hmm_range **phmm_range); -bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); + struct amdgpu_hmm_range *range); #if defined(CONFIG_HMM_MIRROR) +bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range); +struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo); +void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range); int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr); void amdgpu_hmm_unregister(struct amdgpu_bo *bo); #else @@ -47,7 +54,20 @@ static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr) "add CONFIG_ZONE_DEVICE=y in config file to fix this\n"); return -ENODEV; } + static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {} + +static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range) +{ + return false; +} + +static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo) +{ + return NULL; +} + +static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {} #endif #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 7d9bcb72e8dd..586a58facca1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -149,17 +149,19 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, if (job) { vm = job->vm; fence_ctx = job->base.s_fence ? - job->base.s_fence->scheduled.context : 0; + job->base.s_fence->finished.context : 0; shadow_va = job->shadow_va; csa_va = job->csa_va; gds_va = job->gds_va; init_shadow = job->init_shadow; - af = &job->hw_fence; + af = job->hw_fence; /* Save the context of the job for reset handling. * The driver needs this so it can skip the ring * contents for guilty contexts. */ - af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; + af->context = fence_ctx; + /* the vm fence is also part of the job's context */ + job->hw_vm_fence->context = fence_ctx; } else { vm = NULL; fence_ctx = 0; @@ -167,23 +169,28 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, csa_va = 0; gds_va = 0; init_shadow = false; - af = NULL; + af = kzalloc(sizeof(*af), GFP_ATOMIC); + if (!af) + return -ENOMEM; } if (!ring->sched.ready) { dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); - return -EINVAL; + r = -EINVAL; + goto free_fence; } if (vm && !job->vmid) { dev_err(adev->dev, "VM IB without ID\n"); - return -EINVAL; + r = -EINVAL; + goto free_fence; } if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) && (!ring->funcs->secure_submission_supported)) { dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name); - return -EINVAL; + r = -EINVAL; + goto free_fence; } alloc_size = ring->funcs->emit_frame_size + num_ibs * @@ -192,7 +199,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, r = amdgpu_ring_alloc(ring, alloc_size); if (r) { dev_err(adev->dev, "scheduling IB failed (%d).\n", r); - return r; + goto free_fence; } need_ctx_switch = ring->current_ctx != fence_ctx; @@ -289,7 +296,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); } - r = amdgpu_fence_emit(ring, f, af, fence_flags); + r = amdgpu_fence_emit(ring, af, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vmid) @@ -297,6 +304,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_undo(ring); return r; } + *f = &af->base; + /* get a ref for the job */ + if (job) + dma_fence_get(*f); if (ring->funcs->insert_end) ring->funcs->insert_end(ring); @@ -317,12 +328,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, * fence so we know what rings contents to backup * after we reset the queue. */ - amdgpu_fence_save_wptr(*f); + amdgpu_fence_save_wptr(af); amdgpu_ring_ib_end(ring); amdgpu_ring_commit(ring); return 0; + +free_fence: + if (!job) + kfree(af); + return r; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3ef5bc95642c..9cab36322c16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -201,58 +201,34 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct dma_fence **fences; - unsigned i; + /* If anybody is waiting for a VMID let everybody wait for fairness */ if (!dma_fence_is_signaled(ring->vmid_wait)) { *fence = dma_fence_get(ring->vmid_wait); return 0; } - fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT); - if (!fences) - return -ENOMEM; - /* Check if we have an idle VMID */ - i = 0; - list_for_each_entry((*idle), &id_mgr->ids_lru, list) { + list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) { /* Don't use per engine and per process VMID at the same time */ struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? NULL : ring; - fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); - if (!fences[i]) - break; - ++i; + *fence = amdgpu_sync_peek_fence(&(*idle)->active, r); + if (!(*fence)) + return 0; } - /* If we can't find a idle VMID to use, wait till one becomes available */ - if (&(*idle)->list == &id_mgr->ids_lru) { - u64 fence_context = adev->vm_manager.fence_context + ring->idx; - unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; - struct dma_fence_array *array; - unsigned j; - - *idle = NULL; - for (j = 0; j < i; ++j) - dma_fence_get(fences[j]); - - array = dma_fence_array_create(i, fences, fence_context, - seqno, true); - if (!array) { - for (j = 0; j < i; ++j) - dma_fence_put(fences[j]); - kfree(fences); - return -ENOMEM; - } - - *fence = dma_fence_get(&array->base); - dma_fence_put(ring->vmid_wait); - ring->vmid_wait = &array->base; - return 0; - } - kfree(fences); + /* + * If we can't find a idle VMID to use, wait on a fence from the least + * recently used in the hope that it will be available soon. + */ + *idle = NULL; + dma_fence_put(ring->vmid_wait); + ring->vmid_wait = dma_fence_get(*fence); + /* This is the reference we return */ + dma_fence_get(*fence); return 0; } @@ -313,7 +289,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, * user of the VMID. */ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished, - GFP_NOWAIT); + GFP_ATOMIC); if (r) return r; @@ -373,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, */ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished, - GFP_NOWAIT); + GFP_ATOMIC); if (r) return r; @@ -426,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Remember this submission as user of the VMID */ r = amdgpu_sync_fence(&id->active, &job->base.s_fence->finished, - GFP_NOWAIT); + GFP_ATOMIC); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c index 9cddbf50442a..37270c4dab8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c @@ -280,6 +280,8 @@ int isp_kernel_buffer_alloc(struct device *dev, u64 size, if (ret) return ret; + /* Ensure *bo is NULL so a new BO will be created */ + *bo = NULL; ret = amdgpu_bo_create_kernel(adev, size, ISP_MC_ADDR_ALIGN, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index d020a890a0ea..0a0dcbf0798d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -130,14 +130,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) } /* attempt a per ring reset */ - if (unlikely(adev->debug_disable_gpu_ring_reset)) { - dev_err(adev->dev, "Ring reset disabled by debug mask\n"); - } else if (amdgpu_gpu_recovery && - amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) && - ring->funcs->reset) { + if (amdgpu_gpu_recovery && + amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) && + ring->funcs->reset) { dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); - r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence); + r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence); if (!r) { atomic_inc(&ring->adev->gpu_reset_counter); dev_err(adev->dev, "Ring %s reset succeeded\n", @@ -186,6 +184,9 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int num_ibs, struct amdgpu_job **job, u64 drm_client_id) { + struct amdgpu_fence *af; + int r; + if (num_ibs == 0) return -EINVAL; @@ -193,6 +194,20 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (!*job) return -ENOMEM; + af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); + if (!af) { + r = -ENOMEM; + goto err_job; + } + (*job)->hw_fence = af; + + af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); + if (!af) { + r = -ENOMEM; + goto err_fence; + } + (*job)->hw_vm_fence = af; + (*job)->vm = vm; amdgpu_sync_create(&(*job)->explicit_sync); @@ -204,6 +219,14 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, return drm_sched_job_init(&(*job)->base, entity, 1, owner, drm_client_id); + +err_fence: + kfree((*job)->hw_fence); +err_job: + kfree(*job); + *job = NULL; + + return r; } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, @@ -223,7 +246,10 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, if (r) { if (entity) drm_sched_job_cleanup(&(*job)->base); + kfree((*job)->hw_vm_fence); + kfree((*job)->hw_fence); kfree(*job); + *job = NULL; } return r; @@ -251,11 +277,11 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) struct dma_fence *f; unsigned i; - /* Check if any fences where initialized */ + /* Check if any fences were initialized */ if (job->base.s_fence && job->base.s_fence->finished.ops) f = &job->base.s_fence->finished; - else if (job->hw_fence.base.ops) - f = &job->hw_fence.base; + else if (job->hw_fence && job->hw_fence->base.ops) + f = &job->hw_fence->base; else f = NULL; @@ -271,11 +297,16 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) amdgpu_sync_free(&job->explicit_sync); - /* only put the hw fence if has embedded fence */ - if (!job->hw_fence.base.ops) - kfree(job); + if (job->hw_fence->base.ops) + dma_fence_put(&job->hw_fence->base); + else + kfree(job->hw_fence); + if (job->hw_vm_fence->base.ops) + dma_fence_put(&job->hw_vm_fence->base); else - dma_fence_put(&job->hw_fence.base); + kfree(job->hw_vm_fence); + + kfree(job); } void amdgpu_job_set_gang_leader(struct amdgpu_job *job, @@ -304,10 +335,16 @@ void amdgpu_job_free(struct amdgpu_job *job) if (job->gang_submit != &job->base.s_fence->scheduled) dma_fence_put(job->gang_submit); - if (!job->hw_fence.base.ops) - kfree(job); + if (job->hw_fence->base.ops) + dma_fence_put(&job->hw_fence->base); + else + kfree(job->hw_fence); + if (job->hw_vm_fence->base.ops) + dma_fence_put(&job->hw_vm_fence->base); else - dma_fence_put(&job->hw_fence.base); + kfree(job->hw_vm_fence); + + kfree(job); } struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 4a6487eb6cb5..7abf069d17d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -64,7 +64,8 @@ struct amdgpu_job { struct drm_sched_job base; struct amdgpu_vm *vm; struct amdgpu_sync explicit_sync; - struct amdgpu_fence hw_fence; + struct amdgpu_fence *hw_fence; + struct amdgpu_fence *hw_vm_fence; struct dma_fence *gang_submit; uint32_t preamble_status; uint32_t preemption_status; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index b3e6b3fcdf2c..6ee77f431d56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1471,7 +1471,6 @@ error_pasid: kfree(fpriv); out_suspend: - pm_runtime_mark_last_busy(dev->dev); pm_put: pm_runtime_put_autosuspend(dev->dev); @@ -1539,7 +1538,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, kfree(fpriv); file_priv->driver_priv = NULL; - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 4883adcfbb4b..9c182ce501af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -105,8 +105,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev) spin_lock_init(&adev->mes.ring_lock[i]); adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; - adev->mes.vmid_mask_mmhub = 0xffffff00; - adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00; + adev->mes.vmid_mask_mmhub = 0xFF00; + adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xFFFE : 0xFF00; num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me; if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES) @@ -528,6 +528,18 @@ error: return r; } +int amdgpu_mes_hdp_flush(struct amdgpu_device *adev) +{ + uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask; + + hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev); + hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev); + ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0; + + return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset, + ref_and_mask, ref_and_mask); +} + int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 97c137c90f97..e989225b354b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -239,6 +239,7 @@ struct mes_add_queue_input { struct mes_remove_queue_input { uint32_t doorbell_offset; uint64_t gang_context_addr; + bool remove_queue_after_reset; }; struct mes_map_legacy_queue_input { @@ -428,6 +429,7 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev, int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask); +int amdgpu_mes_hdp_flush(struct amdgpu_device *adev); int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 20460cfd09bc..dc8d2f52c7d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -326,6 +326,8 @@ struct amdgpu_mode_info { struct drm_property *audio_property; /* FMT dithering */ struct drm_property *dither_property; + /* Adaptive Backlight Modulation (power feature) */ + struct drm_property *abm_level_property; /* hardcoded DFP edid from BIOS */ const struct drm_edid *bios_hardcoded_edid; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 656b8a931dae..52c2d1731aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -96,6 +96,7 @@ struct amdgpu_bo_va { * if non-zero, cannot unmap from GPU because user queues may still access it */ unsigned int queue_refcount; + atomic_t userq_va_mapped; }; struct amdgpu_bo { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8c0e5d03de50..0b10497d487c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1539,6 +1539,7 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp, uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; uint64_t dst_node_id = node_info.node_id; uint8_t dst_num_hops = node_info.num_hops; + uint8_t dst_is_sharing_enabled = node_info.is_sharing_enabled; uint8_t dst_num_links = node_info.num_links; hive = amdgpu_get_xgmi_hive(psp->adev); @@ -1558,13 +1559,20 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp, continue; mirror_top_info->nodes[j].num_hops = dst_num_hops; - /* - * prevent 0 num_links value re-reflection since reflection + mirror_top_info->nodes[j].is_sharing_enabled = dst_is_sharing_enabled; + /* prevent 0 num_links value re-reflection since reflection * criteria is based on num_hops (direct or indirect). - * */ - if (dst_num_links) + if (dst_num_links) { mirror_top_info->nodes[j].num_links = dst_num_links; + /* swap src and dst due to frame of reference */ + for (int k = 0; k < dst_num_links; k++) { + mirror_top_info->nodes[j].port_num[k].src_xgmi_port_num = + node_info.port_num[k].dst_xgmi_port_num; + mirror_top_info->nodes[j].port_num[k].dst_xgmi_port_num = + node_info.port_num[k].src_xgmi_port_num; + } + } break; } @@ -1639,9 +1647,10 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == - IP_VERSION(13, 0, 14); - bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : - psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; + IP_VERSION(13, 0, 14) || + amdgpu_sriov_vf(psp->adev); + bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG || + amdgpu_sriov_xgmi_ta_ext_peer_link_en(psp->adev); /* popluate the shared output buffer rather than the cmd input buffer * with node_ids as the input for GET_PEER_LINKS command execution. @@ -2355,8 +2364,11 @@ static int psp_securedisplay_initialize(struct psp_context *psp) if (!ret && !psp->securedisplay_context.context.resp_status) { psp->securedisplay_context.context.initialized = true; mutex_init(&psp->securedisplay_context.mutex); - } else + } else { + /* don't try again */ + psp->securedisplay_context.context.bin_desc.size_bytes = 0; return ret; + } mutex_lock(&psp->securedisplay_context.mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c index 123bcf5c2bb1..bacf888735db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c @@ -101,7 +101,6 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf, } amdgpu_gfx_off_ctrl(adev, true); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index e0ee21150860..2a6cf7963dde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -41,6 +41,7 @@ #include "atom.h" #include "amdgpu_reset.h" #include "amdgpu_psp.h" +#include "amdgpu_ras_mgr.h" #ifdef CONFIG_X86_MCE_AMD #include <asm/mce.h> @@ -149,6 +150,8 @@ static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev); #ifdef CONFIG_X86_MCE_AMD static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); +static void +amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev); struct mce_notifier_adev_list { struct amdgpu_device *devs[MAX_GPU_INSTANCE]; int num_gpu; @@ -611,6 +614,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, return size; } +static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev); + /** * DOC: AMDGPU RAS debugfs EEPROM table reset interface * @@ -635,6 +640,11 @@ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, (struct amdgpu_device *)file_inode(f)->i_private; int ret; + if (amdgpu_uniras_enabled(adev)) { + ret = amdgpu_uniras_clear_badpages_info(adev); + return ret ? ret : size; + } + ret = amdgpu_ras_eeprom_reset_table( &(amdgpu_ras_get_context(adev)->eeprom_control)); @@ -1542,9 +1552,51 @@ out_fini_err_data: return ret; } +static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev) +{ + struct ras_cmd_dev_handle req = {0}; + int ret; + + ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO, + &req, sizeof(req), NULL, 0); + if (ret) { + dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret); + return ret; + } + + return 0; +} + +static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev, + struct ras_query_if *info) +{ + struct ras_cmd_block_ecc_info_req req = {0}; + struct ras_cmd_block_ecc_info_rsp rsp = {0}; + int ret; + + if (!info) + return -EINVAL; + + req.block_id = info->head.block; + req.subblock_id = info->head.sub_block_index; + + ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS, + &req, sizeof(req), &rsp, sizeof(rsp)); + if (!ret) { + info->ce_count = rsp.ce_count; + info->ue_count = rsp.ue_count; + info->de_count = rsp.de_count; + } + + return ret; +} + int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) { - return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_uniras_query_block_ecc(adev, info); + else + return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID); } int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, @@ -1596,6 +1648,27 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, return 0; } +static int amdgpu_uniras_error_inject(struct amdgpu_device *adev, + struct ras_inject_if *info) +{ + struct ras_cmd_inject_error_req inject_req; + struct ras_cmd_inject_error_rsp rsp; + + if (!info) + return -EINVAL; + + memset(&inject_req, 0, sizeof(inject_req)); + inject_req.block_id = info->head.block; + inject_req.subblock_id = info->head.sub_block_index; + inject_req.address = info->address; + inject_req.error_type = info->head.type; + inject_req.instance_mask = info->instance_mask; + inject_req.method = info->value; + + return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR, + &inject_req, sizeof(inject_req), &rsp, sizeof(rsp)); +} + /* wrapper of psp_ras_trigger_error */ int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info) @@ -1613,6 +1686,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, info->head.block, info->head.sub_block_index); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_uniras_error_inject(adev, info); + /* inject on guest isn't allowed, return success directly */ if (amdgpu_sriov_vf(adev)) return 0; @@ -1757,7 +1833,9 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev, /* sysfs begin */ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, - struct ras_badpage **bps, unsigned int *count); + struct ras_badpage *bps, uint32_t count, uint32_t start); +static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev, + struct ras_badpage *bps, uint32_t count, uint32_t start); static char *amdgpu_ras_badpage_flags_str(unsigned int flags) { @@ -1815,19 +1893,50 @@ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, unsigned int end = div64_ul(ppos + count - 1, element_size); ssize_t s = 0; struct ras_badpage *bps = NULL; - unsigned int bps_count = 0; + int bps_count = 0, i, status; + uint64_t address; memset(buf, 0, count); - if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) + bps_count = end - start; + bps = kmalloc_array(bps_count, sizeof(*bps), GFP_KERNEL); + if (!bps) + return 0; + + memset(bps, 0, sizeof(*bps) * bps_count); + + if (amdgpu_uniras_enabled(adev)) + bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start); + else + bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start); + + if (bps_count <= 0) { + kfree(bps); return 0; + } + + for (i = 0; i < bps_count; i++) { + address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT; + if (amdgpu_ras_check_critical_address(adev, address)) + continue; + + bps[i].size = AMDGPU_GPU_PAGE_SIZE; + + status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, + address); + if (status == -EBUSY) + bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; + else if (status == -ENOENT) + bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; + else + bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED; - for (; start < end && start < bps_count; start++) s += scnprintf(&buf[s], element_size + 1, "0x%08x : 0x%08x : %1s\n", - bps[start].bp, - bps[start].size, - amdgpu_ras_badpage_flags_str(bps[start].flags)); + bps[i].bp, + bps[i].size, + amdgpu_ras_badpage_flags_str(bps[i].flags)); + } kfree(bps); @@ -1843,12 +1952,42 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); } +static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major, + u32 *minor, u32 *rev) +{ + int i; + + if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev)) + return false; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) { + *major = adev->ip_blocks[i].version->major; + *minor = adev->ip_blocks[i].version->minor; + *rev = adev->ip_blocks[i].version->rev; + return true; + } + } + + return false; +} + static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct amdgpu_ras *con = container_of(attr, struct amdgpu_ras, version_attr); - return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); + u32 major, minor, rev; + ssize_t size = 0; + + size += sysfs_emit_at(buf, size, "table version: 0x%x\n", + con->eeprom_control.tbl_hdr.version); + + if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev)) + size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n", + major, minor, rev); + + return size; } static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev, @@ -2241,6 +2380,11 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY)) return; + if (amdgpu_uniras_enabled(adev)) { + amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL); + return; + } + if (adev->nbio.ras && adev->nbio.ras->handle_ras_controller_intr_no_bifring) adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); @@ -2411,6 +2555,16 @@ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, struct ras_manager *obj; struct ras_ih_data *data; + if (amdgpu_uniras_enabled(adev)) { + struct ras_ih_info ih_info; + + memset(&ih_info, 0, sizeof(ih_info)); + ih_info.block = info->head.block; + memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry)); + + return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info); + } + obj = amdgpu_ras_find_obj(adev, &info->head); if (!obj) return -EINVAL; @@ -2605,62 +2759,83 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) } } -/* recovery begin */ - -/* return 0 on success. - * caller need free bps. - */ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, - struct ras_badpage **bps, unsigned int *count) + struct ras_badpage *bps, uint32_t count, uint32_t start) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; - int i = 0; - int ret = 0, status; + int r = 0; + uint32_t i; if (!con || !con->eh_data || !bps || !count) return -EINVAL; mutex_lock(&con->recovery_lock); data = con->eh_data; - if (!data || data->count == 0) { - *bps = NULL; - ret = -EINVAL; - goto out; + if (start < data->count) { + for (i = start; i < data->count; i++) { + if (!data->bps[i].ts) + continue; + + bps[r].bp = data->bps[i].retired_page; + r++; + if (r >= count) + break; + } } + mutex_unlock(&con->recovery_lock); - *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL); - if (!*bps) { - ret = -ENOMEM; - goto out; - } + return r; +} - for (; i < data->count; i++) { - if (!data->bps[i].ts) - continue; +static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev, + struct ras_badpage *bps, uint32_t count, uint32_t start) +{ + struct ras_cmd_bad_pages_info_req cmd_input; + struct ras_cmd_bad_pages_info_rsp *output; + uint32_t group, start_group, end_group; + uint32_t pos, pos_in_group; + int r = 0, i; - (*bps)[i] = (struct ras_badpage){ - .bp = data->bps[i].retired_page, - .size = AMDGPU_GPU_PAGE_SIZE, - .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, - }; + if (!bps || !count) + return -EINVAL; - if (amdgpu_ras_check_critical_address(adev, - data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) - continue; + output = kmalloc(sizeof(*output), GFP_KERNEL); + if (!output) + return -ENOMEM; - status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, - data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT); - if (status == -EBUSY) - (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; - else if (status == -ENOENT) - (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; + memset(&cmd_input, 0, sizeof(cmd_input)); + + start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) / + RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + + pos = start; + for (group = start_group; group < end_group; group++) { + memset(output, 0, sizeof(*output)); + cmd_input.group_index = group; + if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES, + &cmd_input, sizeof(cmd_input), output, sizeof(*output))) + goto out; + + if (pos >= output->bp_total_cnt) + goto out; + + pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + for (i = pos_in_group; i < output->bp_in_group; i++, pos++) { + if (!output->records[i].ts) + continue; + + bps[r].bp = output->records[i].retired_page; + r++; + if (r >= count) + goto out; + } } - *count = con->bad_page_num; out: - mutex_unlock(&con->recovery_lock); - return ret; + kfree(output); + return r; } static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev, @@ -2748,8 +2923,12 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) type = amdgpu_ras_get_fatal_error_event(adev); list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) { - amdgpu_ras_query_err_status(remote_adev); - amdgpu_ras_log_on_err_counter(remote_adev, type); + if (amdgpu_uniras_enabled(remote_adev)) { + amdgpu_ras_mgr_update_ras_ecc(remote_adev); + } else { + amdgpu_ras_query_err_status(remote_adev); + amdgpu_ras_log_on_err_counter(remote_adev, type); + } } } @@ -2837,8 +3016,13 @@ static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev, addr_in.ma.err_addr = bps->address; addr_in.ma.socket_id = socket; addr_in.ma.ch_inst = bps->mem_channel; - /* tell RAS TA the node instance is not used */ - addr_in.ma.node_inst = TA_RAS_INV_NODE; + if (!amdgpu_ras_smu_eeprom_supported(adev)) { + /* tell RAS TA the node instance is not used */ + addr_in.ma.node_inst = TA_RAS_INV_NODE; + } else { + addr_in.ma.umc_inst = bps->mcumc_id; + addr_in.ma.node_inst = bps->cu; + } if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) ret = adev->umc.ras->convert_ras_err_addr(adev, err_data, @@ -2981,8 +3165,16 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev, int i = 0; enum amdgpu_memory_partition save_nps; - save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK; - bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT); + if (!amdgpu_ras_smu_eeprom_supported(adev)) { + save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK; + bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT); + } else { + /* if pmfw manages eeprom, save_nps is not stored on eeprom, + * we should always convert mca address into physical address, + * make save_nps different from nps + */ + save_nps = nps + 1; + } if (save_nps == nps) { if (amdgpu_umc_pages_in_a_row(adev, err_data, @@ -3048,7 +3240,8 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, if (from_rom) { /* there is no pa recs in V3, so skip pa recs processing */ - if (control->tbl_hdr.version < RAS_TABLE_VER_V3) { + if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) && + !amdgpu_ras_smu_eeprom_supported(adev)) { for (i = 0; i < pages; i++) { if (control->ras_num_recs - i >= adev->umc.retire_unit) { if ((bps[i].address == bps[i + 1].address) && @@ -3118,7 +3311,13 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, mutex_lock(&con->recovery_lock); control = &con->eeprom_control; data = con->eh_data; - unit_num = data->count / adev->umc.retire_unit - control->ras_num_recs; + if (amdgpu_ras_smu_eeprom_supported(adev)) + unit_num = control->ras_num_recs - + control->ras_num_recs_old; + else + unit_num = data->count / adev->umc.retire_unit - + control->ras_num_recs; + save_count = con->bad_page_num - control->ras_num_bad_pages; mutex_unlock(&con->recovery_lock); @@ -3126,7 +3325,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, *new_cnt = unit_num; /* only new entries are saved */ - if (unit_num > 0) { + if (unit_num && save_count) { /*old asics only save pa to eeprom like before*/ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) { if (amdgpu_ras_eeprom_append(control, @@ -3179,7 +3378,8 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) /*In V3, there is no pa recs, and some cases(when address==0) may be parsed as pa recs, so add verion check to avoid it. */ - if (control->tbl_hdr.version < RAS_TABLE_VER_V3) { + if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) && + !amdgpu_ras_smu_eeprom_supported(adev)) { for (i = 0; i < control->ras_num_recs; i++) { if ((control->ras_num_recs - i) >= adev->umc.retire_unit) { if ((bps[i].address == bps[i + 1].address) && @@ -3590,7 +3790,12 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) if (!con || amdgpu_sriov_vf(adev)) return 0; + if (amdgpu_uniras_enabled(adev)) + return 0; + control = &con->eeprom_control; + con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev); + ret = amdgpu_ras_eeprom_init(control); control->is_eeprom_valid = !ret; @@ -3751,7 +3956,9 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) mutex_unlock(&con->recovery_lock); amdgpu_ras_critical_region_init(adev); - +#ifdef CONFIG_X86_MCE_AMD + amdgpu_unregister_bad_pages_mca_notifier(adev); +#endif return 0; } /* recovery end */ @@ -3975,7 +4182,6 @@ static void amdgpu_ras_counte_dw(struct work_struct *work) atomic_set(&con->ras_ue_count, ue_count); } - pm_runtime_mark_last_busy(dev->dev); Out: pm_runtime_put_autosuspend(dev->dev); } @@ -4584,6 +4790,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_ struct ras_event_state *event_state; int ret = 0; + if (amdgpu_uniras_enabled(adev)) + return 0; + if (type >= RAS_EVENT_TYPE_COUNT) { ret = -EINVAL; goto out; @@ -4634,20 +4843,18 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type return id; } -void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) +int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); enum ras_event_type type = RAS_EVENT_TYPE_FATAL; - u64 event_id; + u64 event_id = RAS_EVENT_INVALID_ID; - if (amdgpu_ras_mark_ras_event(adev, type)) { - dev_err(adev->dev, - "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n"); - return; - } + if (amdgpu_uniras_enabled(adev)) + return 0; - event_id = amdgpu_ras_acquire_event_id(adev, type); + if (!amdgpu_ras_mark_ras_event(adev, type)) + event_id = amdgpu_ras_acquire_event_id(adev, type); RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error" "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); @@ -4656,6 +4863,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; amdgpu_ras_reset_gpu(adev); } + + return -EBUSY; } bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) @@ -4783,6 +4992,28 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) notifier_registered = true; } } +static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev) +{ + int i, j; + + if (!notifier_registered && !mce_adev_list.num_gpu) + return; + for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) { + if (mce_adev_list.devs[i] == adev) + mce_adev_list.devs[i] = NULL; + if (!mce_adev_list.devs[i]) + ++j; + } + + if (j == mce_adev_list.num_gpu) { + mce_adev_list.num_gpu = 0; + /* Unregister x86 notifier with MCE subsystem. */ + if (notifier_registered) { + mce_unregister_decode_chain(&amdgpu_bad_page_nb); + notifier_registered = false; + } + } +} #endif struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) @@ -5408,6 +5639,9 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_ras_mgr_is_rma(adev); + if (!con) return false; @@ -5490,3 +5724,25 @@ bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr return ret; } + +void amdgpu_ras_pre_reset(struct amdgpu_device *adev, + struct list_head *device_list) +{ + struct amdgpu_device *tmp_adev = NULL; + + list_for_each_entry(tmp_adev, device_list, reset_list) { + if (amdgpu_uniras_enabled(tmp_adev)) + amdgpu_ras_mgr_pre_reset(tmp_adev); + } +} + +void amdgpu_ras_post_reset(struct amdgpu_device *adev, + struct list_head *device_list) +{ + struct amdgpu_device *tmp_adev = NULL; + + list_for_each_entry(tmp_adev, device_list, reset_list) { + if (amdgpu_uniras_enabled(tmp_adev)) + amdgpu_ras_mgr_post_reset(tmp_adev); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 6cf0dfd38be8..ff44190d7d98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -503,7 +503,34 @@ struct ras_critical_region { uint64_t size; }; +struct ras_eeprom_table_version { + uint32_t minor : 16; + uint32_t major : 16; +}; + +struct ras_eeprom_smu_funcs { + int (*get_ras_table_version)(struct amdgpu_device *adev, + uint32_t *table_version); + int (*get_badpage_count)(struct amdgpu_device *adev, uint32_t *count, uint32_t timeout); + int (*get_badpage_mca_addr)(struct amdgpu_device *adev, uint16_t index, uint64_t *mca_addr); + int (*set_timestamp)(struct amdgpu_device *adev, uint64_t timestamp); + int (*get_timestamp)(struct amdgpu_device *adev, + uint16_t index, uint64_t *timestamp); + int (*get_badpage_ipid)(struct amdgpu_device *adev, uint16_t index, uint64_t *ipid); + int (*erase_ras_table)(struct amdgpu_device *adev, uint32_t *result); +}; + +enum ras_smu_feature_flags { + RAS_SMU_FEATURE_BIT__RAS_EEPROM = BIT_ULL(0), +}; + +struct ras_smu_drv { + const struct ras_eeprom_smu_funcs *smu_eeprom_funcs; + void (*ras_smu_feature_flags)(struct amdgpu_device *adev, uint64_t *flags); +}; + struct amdgpu_ras { + void *ras_mgr; /* ras infrastructure */ /* for ras itself. */ uint32_t features; @@ -590,6 +617,10 @@ struct amdgpu_ras { /* Protect poison injection */ struct mutex poison_lock; + + /* Disable/Enable uniras switch */ + bool uniras_enabled; + const struct ras_smu_drv *ras_smu_drv; }; struct ras_fs_data { @@ -909,7 +940,7 @@ static inline void amdgpu_ras_intr_cleared(void) atomic_set(&amdgpu_ras_in_intr, 0); } -void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); +int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready); @@ -1008,4 +1039,9 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id, const char *fmt, ...); bool amdgpu_ras_is_rma(struct amdgpu_device *adev); + +void amdgpu_ras_pre_reset(struct amdgpu_device *adev, + struct list_head *device_list); +void amdgpu_ras_post_reset(struct amdgpu_device *adev, + struct list_head *device_list); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 3eb3fb55ccb0..64dd7a81bff5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -32,6 +32,7 @@ #include <linux/uaccess.h> #include "amdgpu_reset.h" +#include "amdgpu_ras_mgr.h" /* These are memory addresses as would be seen by one or more EEPROM * chips strung on the I2C bus, usually by manipulating pins 1-3 of a @@ -123,6 +124,8 @@ RAS_TABLE_V2_1_INFO_SIZE) \ / RAS_TABLE_RECORD_SIZE) +#define RAS_SMU_MESSAGE_TIMEOUT_MS 1000 /* 1s */ + /* Given a zero-based index of an EEPROM RAS record, yields the EEPROM * offset off of RAS_TABLE_START. That is, this is something you can * add to control->i2c_address, and then tell I2C layer to read @@ -443,40 +446,57 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + u32 erase_res = 0; u8 csum; int res; mutex_lock(&control->ras_tbl_mutex); - hdr->header = RAS_TABLE_HDR_VAL; - amdgpu_ras_set_eeprom_table_version(control); + if (!amdgpu_ras_smu_eeprom_supported(adev)) { + hdr->header = RAS_TABLE_HDR_VAL; + amdgpu_ras_set_eeprom_table_version(control); - if (hdr->version >= RAS_TABLE_VER_V2_1) { - hdr->first_rec_offset = RAS_RECORD_START_V2_1; - hdr->tbl_size = RAS_TABLE_HEADER_SIZE + - RAS_TABLE_V2_1_INFO_SIZE; - rai->rma_status = GPU_HEALTH_USABLE; - /** - * GPU health represented as a percentage. - * 0 means worst health, 100 means fully health. - */ - rai->health_percent = 100; - /* ecc_page_threshold = 0 means disable bad page retirement */ - rai->ecc_page_threshold = con->bad_page_cnt_threshold; + if (hdr->version >= RAS_TABLE_VER_V2_1) { + hdr->first_rec_offset = RAS_RECORD_START_V2_1; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE; + rai->rma_status = GPU_HEALTH_USABLE; + + control->ras_record_offset = RAS_RECORD_START_V2_1; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1; + /** + * GPU health represented as a percentage. + * 0 means worst health, 100 means fully health. + */ + rai->health_percent = 100; + /* ecc_page_threshold = 0 means disable bad page retirement */ + rai->ecc_page_threshold = con->bad_page_cnt_threshold; + } else { + hdr->first_rec_offset = RAS_RECORD_START; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE; + + control->ras_record_offset = RAS_RECORD_START; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT; + } + + csum = __calc_hdr_byte_sum(control); + if (hdr->version >= RAS_TABLE_VER_V2_1) + csum += __calc_ras_info_byte_sum(control); + csum = -csum; + hdr->checksum = csum; + res = __write_table_header(control); + if (!res && hdr->version > RAS_TABLE_VER_V1) + res = __write_table_ras_info(control); } else { - hdr->first_rec_offset = RAS_RECORD_START; - hdr->tbl_size = RAS_TABLE_HEADER_SIZE; + res = amdgpu_ras_smu_erase_ras_table(adev, &erase_res); + if (res || erase_res) { + dev_warn(adev->dev, "RAS EEPROM reset failed, res:%d result:%d", + res, erase_res); + if (!res) + res = -EIO; + } } - csum = __calc_hdr_byte_sum(control); - if (hdr->version >= RAS_TABLE_VER_V2_1) - csum += __calc_ras_info_byte_sum(control); - csum = -csum; - hdr->checksum = csum; - res = __write_table_header(control); - if (!res && hdr->version > RAS_TABLE_VER_V1) - res = __write_table_ras_info(control); - control->ras_num_recs = 0; control->ras_num_bad_pages = 0; control->ras_num_mca_recs = 0; @@ -556,6 +576,9 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_ras_mgr_check_eeprom_safety_watermark(adev); + if (!__is_ras_eeprom_supported(adev) || !amdgpu_bad_page_threshold) return false; @@ -766,7 +789,8 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) "Saved bad pages %d reaches threshold value %d\n", control->ras_num_bad_pages, ras->bad_page_cnt_threshold); - if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev)) + if (adev->cper.enabled && !amdgpu_uniras_enabled(adev) && + amdgpu_cper_generate_bp_threshold_record(adev)) dev_warn(adev->dev, "fail to generate bad page threshold cper records\n"); if ((amdgpu_bad_page_threshold != -1) && @@ -849,6 +873,71 @@ Out: return res; } +int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + int ret, retry = 20; + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return 0; + + control->ras_num_recs_old = control->ras_num_recs; + + do { + /* 1000ms timeout is long enough, smu_get_badpage_count won't + * return -EBUSY before timeout. + */ + ret = amdgpu_ras_smu_get_badpage_count(adev, + &(control->ras_num_recs), RAS_SMU_MESSAGE_TIMEOUT_MS); + if (!ret && + (control->ras_num_recs_old == control->ras_num_recs)) { + /* record number update in PMFW needs some time, + * smu_get_badpage_count may return immediately without + * count update, sleep for a while and retry again. + */ + msleep(50); + retry--; + } else { + break; + } + } while (retry); + + /* no update of record number is not a real failure, + * don't print warning here + */ + if (!ret && (control->ras_num_recs_old == control->ras_num_recs)) + ret = -EINVAL; + + return ret; +} + +static int amdgpu_ras_smu_eeprom_append(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev) || !con) + return 0; + + control->ras_num_bad_pages = con->bad_page_num; + + if (amdgpu_bad_page_threshold != 0 && + control->ras_num_bad_pages > con->bad_page_cnt_threshold) { + dev_warn(adev->dev, + "Saved bad pages %d reaches threshold value %d\n", + control->ras_num_bad_pages, con->bad_page_cnt_threshold); + + if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev)) + dev_warn(adev->dev, "fail to generate bad page threshold cper records\n"); + + if ((amdgpu_bad_page_threshold != -1) && + (amdgpu_bad_page_threshold != -2)) + con->is_rma = true; + } + + return 0; +} + /** * amdgpu_ras_eeprom_append -- append records to the EEPROM RAS table * @control: pointer to control structure @@ -873,6 +962,9 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, if (!__is_ras_eeprom_supported(adev)) return 0; + if (amdgpu_ras_smu_eeprom_supported(adev)) + return amdgpu_ras_smu_eeprom_append(control); + if (num == 0) { dev_err(adev->dev, "will not append 0 records\n"); return -EINVAL; @@ -948,6 +1040,50 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, return res; } +int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *record, u32 rec_idx, + const u32 num) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + uint64_t ts, end_idx; + int i, ret; + u64 mca, ipid; + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return 0; + + if (!adev->umc.ras || !adev->umc.ras->mca_ipid_parse) + return -EOPNOTSUPP; + + end_idx = rec_idx + num; + for (i = rec_idx; i < end_idx; i++) { + ret = amdgpu_ras_smu_get_badpage_mca_addr(adev, i, &mca); + if (ret) + return ret; + + ret = amdgpu_ras_smu_get_badpage_ipid(adev, i, &ipid); + if (ret) + return ret; + + ret = amdgpu_ras_smu_get_timestamp(adev, i, &ts); + if (ret) + return ret; + + record[i - rec_idx].address = mca; + /* retired_page (pa) is unused now */ + record[i - rec_idx].retired_page = 0x1ULL; + record[i - rec_idx].ts = ts; + record[i - rec_idx].err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + + adev->umc.ras->mca_ipid_parse(adev, ipid, + (uint32_t *)&(record[i - rec_idx].cu), + (uint32_t *)&(record[i - rec_idx].mem_channel), + (uint32_t *)&(record[i - rec_idx].mcumc_id), NULL); + } + + return 0; +} + /** * amdgpu_ras_eeprom_read -- read EEPROM * @control: pointer to control structure @@ -969,6 +1105,9 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, u8 *buf, *pp; u32 g0, g1; + if (amdgpu_ras_smu_eeprom_supported(adev)) + return amdgpu_ras_eeprom_read_idx(control, record, 0, num); + if (!__is_ras_eeprom_supported(adev)) return 0; @@ -1140,6 +1279,10 @@ static ssize_t amdgpu_ras_debugfs_table_read(struct file *f, char __user *buf, int res = -EFAULT; size_t data_len; + /* pmfw manages eeprom data by itself */ + if (amdgpu_ras_smu_eeprom_supported(adev)) + return 0; + mutex_lock(&control->ras_tbl_mutex); /* We want *pos - data_len > 0, which means there's @@ -1370,6 +1513,42 @@ Out: return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res; } +static int amdgpu_ras_smu_eeprom_init(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + uint64_t local_time; + int res; + + ras->is_rma = false; + + if (!__is_ras_eeprom_supported(adev)) + return 0; + mutex_init(&control->ras_tbl_mutex); + + res = amdgpu_ras_smu_get_table_version(adev, &(hdr->version)); + if (res) + return res; + + res = amdgpu_ras_smu_get_badpage_count(adev, + &(control->ras_num_recs), 100); + if (res) + return res; + + local_time = (uint64_t)ktime_get_real_seconds(); + res = amdgpu_ras_smu_set_timestamp(adev, local_time); + if (res) + return res; + + control->ras_max_record_count = 4000; + + control->ras_num_mca_recs = 0; + control->ras_num_pa_recs = 0; + + return 0; +} + int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) { struct amdgpu_device *adev = to_amdgpu_device(control); @@ -1378,6 +1557,9 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int res; + if (amdgpu_ras_smu_eeprom_supported(adev)) + return amdgpu_ras_smu_eeprom_init(control); + ras->is_rma = false; if (!__is_ras_eeprom_supported(adev)) @@ -1444,6 +1626,47 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) return 0; } +static int amdgpu_ras_smu_eeprom_check(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (!__is_ras_eeprom_supported(adev)) + return 0; + + control->ras_num_bad_pages = ras->bad_page_num; + + if ((ras->bad_page_cnt_threshold < control->ras_num_bad_pages) && + amdgpu_bad_page_threshold != 0) { + dev_warn(adev->dev, + "RAS records:%d exceed threshold:%d\n", + control->ras_num_bad_pages, ras->bad_page_cnt_threshold); + if ((amdgpu_bad_page_threshold == -1) || + (amdgpu_bad_page_threshold == -2)) { + dev_warn(adev->dev, + "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n"); + } else { + ras->is_rma = true; + dev_warn(adev->dev, + "User defined threshold is set, runtime service will be halt when threshold is reached\n"); + } + + return 0; + } + + dev_dbg(adev->dev, + "Found existing EEPROM table with %d records", + control->ras_num_bad_pages); + + /* Warn if we are at 90% of the threshold or above + */ + if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold) + dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d", + control->ras_num_bad_pages, + ras->bad_page_cnt_threshold); + return 0; +} + int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) { struct amdgpu_device *adev = to_amdgpu_device(control); @@ -1451,6 +1674,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int res = 0; + if (amdgpu_ras_smu_eeprom_supported(adev)) + return amdgpu_ras_smu_eeprom_check(control); + if (!__is_ras_eeprom_supported(adev)) return 0; @@ -1541,7 +1767,8 @@ void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev) struct amdgpu_ras_eeprom_control *control; int res; - if (!__is_ras_eeprom_supported(adev) || !ras) + if (!__is_ras_eeprom_supported(adev) || !ras || + amdgpu_ras_smu_eeprom_supported(adev)) return; control = &ras->eeprom_control; if (!control->is_eeprom_valid) @@ -1561,4 +1788,143 @@ void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev) control->is_eeprom_valid = false; } return; -}
\ No newline at end of file +} + +static const struct ras_smu_drv *amdgpu_ras_get_smu_ras_drv(struct amdgpu_device *adev) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (!ras) + return NULL; + + return ras->ras_smu_drv; +} + +static uint64_t amdgpu_ras_smu_get_feature_flags(struct amdgpu_device *adev) +{ + const struct ras_smu_drv *ras_smu_drv = amdgpu_ras_get_smu_ras_drv(adev); + uint64_t flags = 0ULL; + + if (!ras_smu_drv) + goto out; + + if (ras_smu_drv->ras_smu_feature_flags) + ras_smu_drv->ras_smu_feature_flags(adev, &flags); + +out: + return flags; +} + +bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + uint64_t flags = 0ULL; + + if (!__is_ras_eeprom_supported(adev) || !smu_ras_drv) + return false; + + if (!smu_ras_drv->smu_eeprom_funcs) + return false; + + flags = amdgpu_ras_smu_get_feature_flags(adev); + + return !!(flags & RAS_SMU_FEATURE_BIT__RAS_EEPROM); +} + +int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev, + uint32_t *table_version) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->get_ras_table_version) + return smu_ras_drv->smu_eeprom_funcs->get_ras_table_version(adev, + table_version); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev, + uint32_t *count, uint32_t timeout) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->get_badpage_count) + return smu_ras_drv->smu_eeprom_funcs->get_badpage_count(adev, + count, timeout); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev, + uint16_t index, uint64_t *mca_addr) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr) + return smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr(adev, + index, mca_addr); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev, + uint64_t timestamp) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->set_timestamp) + return smu_ras_drv->smu_eeprom_funcs->set_timestamp(adev, + timestamp); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev, + uint16_t index, uint64_t *timestamp) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->get_timestamp) + return smu_ras_drv->smu_eeprom_funcs->get_timestamp(adev, + index, timestamp); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev, + uint16_t index, uint64_t *ipid) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid) + return smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid(adev, + index, ipid); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev, + uint32_t *result) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->erase_ras_table) + return smu_ras_drv->smu_eeprom_funcs->erase_ras_table(adev, + result); + return -EOPNOTSUPP; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index ebfca4cb5688..2e5d63957e71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -82,6 +82,7 @@ struct amdgpu_ras_eeprom_control { /* Number of records in the table. */ u32 ras_num_recs; + u32 ras_num_recs_old; /* the bad page number is ras_num_recs or * ras_num_recs * umc.retire_unit @@ -163,6 +164,35 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control); void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev); +bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev); + +int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev, + uint32_t *table_version); + +int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev, + uint32_t *count, uint32_t timeout); + +int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev, + uint16_t index, uint64_t *mca_addr); + +int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev, + uint64_t timestamp); + +int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev, + uint16_t index, uint64_t *timestamp); + +int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev, + uint16_t index, uint64_t *ipid); + +int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev, + uint32_t *result); + +int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *record, u32 rec_idx, + const u32 num); + +int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control); + extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops; extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 5ec5c3ff22bb..c596b6df2e2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -33,6 +33,7 @@ #include <drm/amdgpu_drm.h> #include "amdgpu.h" +#include "amdgpu_ras_mgr.h" #include "atom.h" /* @@ -159,8 +160,16 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - while (ib->length_dw & ring->funcs->align_mask) - ib->ptr[ib->length_dw++] = ring->funcs->nop; + u32 align_mask = ring->funcs->align_mask; + u32 count = ib->length_dw & align_mask; + + if (count) { + count = align_mask + 1 - count; + + memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count); + + ib->length_dw += count; + } } /** @@ -460,9 +469,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, ktime_t deadline; bool ret; - if (unlikely(ring->adev->debug_disable_soft_recovery)) - return false; - deadline = ktime_add_us(ktime_get(), 10000); if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence) @@ -490,6 +496,66 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, */ #if defined(CONFIG_DEBUG_FS) +static ssize_t amdgpu_ras_cper_debugfs_read(struct file *f, char __user *buf, + size_t size, loff_t *offset) +{ + const uint8_t ring_header_size = 12; + struct amdgpu_ring *ring = file_inode(f)->i_private; + struct ras_cmd_cper_snapshot_req *snapshot_req __free(kfree) = + kzalloc(sizeof(struct ras_cmd_cper_snapshot_req), GFP_KERNEL); + struct ras_cmd_cper_snapshot_rsp *snapshot_rsp __free(kfree) = + kzalloc(sizeof(struct ras_cmd_cper_snapshot_rsp), GFP_KERNEL); + struct ras_cmd_cper_record_req *record_req __free(kfree) = + kzalloc(sizeof(struct ras_cmd_cper_record_req), GFP_KERNEL); + struct ras_cmd_cper_record_rsp *record_rsp __free(kfree) = + kzalloc(sizeof(struct ras_cmd_cper_record_rsp), GFP_KERNEL); + uint8_t *ring_header __free(kfree) = + kzalloc(ring_header_size, GFP_KERNEL); + uint32_t total_cper_num; + uint64_t start_cper_id; + int r; + + if (!snapshot_req || !snapshot_rsp || !record_req || !record_rsp || + !ring_header) + return -ENOMEM; + + if (!(*offset)) { + /* Need at least 12 bytes for the header on the first read */ + if (size < ring_header_size) + return -EINVAL; + + if (copy_to_user(buf, ring_header, ring_header_size)) + return -EFAULT; + buf += ring_header_size; + size -= ring_header_size; + } + + r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, + RAS_CMD__GET_CPER_SNAPSHOT, + snapshot_req, sizeof(struct ras_cmd_cper_snapshot_req), + snapshot_rsp, sizeof(struct ras_cmd_cper_snapshot_rsp)); + if (r || !snapshot_rsp->total_cper_num) + return r; + + start_cper_id = snapshot_rsp->start_cper_id; + total_cper_num = snapshot_rsp->total_cper_num; + + record_req->buf_ptr = (uint64_t)(uintptr_t)buf; + record_req->buf_size = size; + record_req->cper_start_id = start_cper_id + *offset; + record_req->cper_num = total_cper_num; + r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, RAS_CMD__GET_CPER_RECORD, + record_req, sizeof(struct ras_cmd_cper_record_req), + record_rsp, sizeof(struct ras_cmd_cper_record_rsp)); + if (r) + return r; + + r = *offset ? record_rsp->real_data_size : record_rsp->real_data_size + ring_header_size; + (*offset) += record_rsp->real_cper_num; + + return r; +} + /* Layout of file is 12 bytes consisting of * - rptr * - wptr @@ -506,6 +572,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, loff_t i; int r; + if (ring->funcs->type == AMDGPU_RING_TYPE_CPER && amdgpu_uniras_enabled(ring->adev)) + return amdgpu_ras_cper_debugfs_read(f, buf, size, pos); + if (*pos & 3 || size & 3) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 4b46e3c26ff3..7a27c6c4bb44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -83,6 +83,7 @@ enum amdgpu_ring_type { AMDGPU_RING_TYPE_MES, AMDGPU_RING_TYPE_UMSCH_MM, AMDGPU_RING_TYPE_CPER, + AMDGPU_RING_TYPE_MAX, }; enum amdgpu_ib_pool_type { @@ -147,16 +148,14 @@ struct amdgpu_fence { u64 wptr; /* fence context for resets */ u64 context; - uint32_t seq; }; extern const struct drm_sched_backend_ops amdgpu_sched_ops; -void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af); -void amdgpu_fence_save_wptr(struct dma_fence *fence); +void amdgpu_fence_save_wptr(struct amdgpu_fence *af); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, @@ -166,8 +165,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev); int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, - struct amdgpu_fence *af, unsigned int flags); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af, + unsigned int flags); int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, uint32_t timeout); bool amdgpu_fence_process(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c index 41ebe690eeff..3739be1b71e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c @@ -159,7 +159,6 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u dev_err(adev->dev, "Invalid input: %s\n", str); } - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index aa9ee5dffa45..2b931e855abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -188,7 +188,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, struct amdgpu_job *job; void *cpu_addr; uint64_t flags; - unsigned int i; int r; BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < @@ -255,16 +254,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr); } else { - dma_addr_t dma_address; - - dma_address = mm_cur->start; - dma_address += adev->vm_manager.vram_base_offset; + u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset; - for (i = 0; i < num_pages; ++i) { - amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address, - flags, cpu_addr); - dma_address += PAGE_SIZE; - } + amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr); } dma_fence_put(amdgpu_job_submit(job)); @@ -286,12 +278,13 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, * move and different for a BO to BO copy. * */ -int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, - const struct amdgpu_copy_mem *src, - const struct amdgpu_copy_mem *dst, - uint64_t size, bool tmz, - struct dma_resv *resv, - struct dma_fence **f) +__attribute__((nonnull)) +static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, + const struct amdgpu_copy_mem *src, + const struct amdgpu_copy_mem *dst, + uint64_t size, bool tmz, + struct dma_resv *resv, + struct dma_fence **f) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_res_cursor src_mm, dst_mm; @@ -365,9 +358,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, } error: mutex_unlock(&adev->mman.gtt_window_lock); - if (f) - *f = dma_fence_get(fence); - dma_fence_put(fence); + *f = fence; return r; } @@ -706,10 +697,11 @@ struct amdgpu_ttm_tt { * memory and start HMM tracking CPU page table update * * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only - * once afterwards to stop HMM tracking + * once afterwards to stop HMM tracking. Its the caller responsibility to ensure + * that range is a valid memory and it is freed too. */ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range) + struct amdgpu_hmm_range *range) { struct ttm_tt *ttm = bo->tbo.ttm; struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); @@ -719,9 +711,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, bool readonly; int r = 0; - /* Make sure get_user_pages_done() can cleanup gracefully */ - *range = NULL; - mm = bo->notifier.mm; if (unlikely(!mm)) { DRM_DEBUG_DRIVER("BO is not registered?\n"); @@ -756,38 +745,6 @@ out_unlock: return r; } -/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations - */ -void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, - struct hmm_range *range) -{ - struct amdgpu_ttm_tt *gtt = (void *)ttm; - - if (gtt && gtt->userptr && range) - amdgpu_hmm_range_get_pages_done(range); -} - -/* - * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change - * Check if the pages backing this ttm range have been invalidated - * - * Returns: true if pages are still valid - */ -bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, - struct hmm_range *range) -{ - struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); - - if (!gtt || !gtt->userptr || !range) - return false; - - DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", - gtt->userptr, ttm->num_pages); - - WARN_ONCE(!range->hmm_pfns, "No user pages to check\n"); - - return !amdgpu_hmm_range_get_pages_done(range); -} #endif /* @@ -797,12 +754,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, * that backs user memory and will ultimately be mapped into the device * address space. */ -void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range) +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range) { unsigned long i; for (i = 0; i < ttm->num_pages; ++i) - ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL; + ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL; } /* @@ -1372,7 +1329,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) mem->mem_type == AMDGPU_PL_MMIO_REMAP)) { flags |= AMDGPU_PTE_SYSTEM; - if (ttm->caching == ttm_cached) + if (ttm && ttm->caching == ttm_cached) flags |= AMDGPU_PTE_SNOOPED; } @@ -1529,6 +1486,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, if (r) goto out; + mutex_lock(&adev->mman.gtt_window_lock); amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; @@ -1543,6 +1501,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, WARN_ON(job->ibs[0].length_dw > num_dw); fence = amdgpu_job_submit(job); + mutex_unlock(&adev->mman.gtt_window_lock); if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) r = -ETIMEDOUT; @@ -1804,18 +1763,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; } - if (!adev->gmc.is_app_apu) { - ret = amdgpu_bo_create_kernel_at( - adev, adev->gmc.real_vram_size - reserve_size, - reserve_size, &adev->mman.fw_reserved_memory, NULL); - if (ret) { - dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); - amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, - NULL, NULL); - return ret; - } - } else { - DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n"); + ret = amdgpu_bo_create_kernel_at( + adev, adev->gmc.real_vram_size - reserve_size, reserve_size, + &adev->mman.fw_reserved_memory, NULL); + if (ret) { + dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, + NULL); + return ret; } return 0; @@ -1837,7 +1792,7 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev) for (i = 0; i < adev->gmc.num_mem_partitions; i++) { ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev, adev->gmc.mem_partitions[i].numa.node, - false, false); + TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M))); } return 0; } @@ -1930,8 +1885,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, adev_to_drm(adev)->anon_inode->i_mapping, adev_to_drm(adev)->vma_offset_manager, - adev->need_swiotlb, - dma_addressing_limited(adev->dev)); + (adev->need_swiotlb ? + TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) | + (dma_addressing_limited(adev->dev) ? + TTM_ALLOCATION_POOL_USE_DMA32 : 0) | + TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M))); if (r) { dev_err(adev->dev, "failed initializing buffer object driver(%d).\n", r); @@ -1980,19 +1938,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; /* - *The reserved vram for driver must be pinned to the specified - *place on the VRAM, so reserve it early. + * The reserved VRAM for the driver must be pinned to a specific + * location in VRAM, so reserve it early. */ r = amdgpu_ttm_drv_reserve_vram_init(adev); if (r) return r; /* - * only NAVI10 and onwards ASIC support for IP discovery. - * If IP discovery enabled, a block of memory should be - * reserved for IP discovey. + * only NAVI10 and later ASICs support IP discovery. + * If IP discovery is enabled, a block of memory should be + * reserved for it. */ - if (adev->mman.discovery_bin) { + if (adev->discovery.reserve_tmr) { r = amdgpu_ttm_reserve_tmr(adev); if (r) return r; @@ -2229,8 +2187,10 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) } else { drm_sched_entity_destroy(&adev->mman.high_pr); drm_sched_entity_destroy(&adev->mman.low_pr); - dma_fence_put(man->move); - man->move = NULL; + /* Drop all the old fences since re-creating the scheduler entities + * will allocate new contexts. + */ + ttm_resource_manager_cleanup(man); } /* this just adjusts TTM size idea, which sets lpfn to the correct value */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 0be2728aa872..577ee04ce0bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -28,6 +28,7 @@ #include <drm/gpu_scheduler.h> #include <drm/ttm/ttm_placement.h> #include "amdgpu_vram_mgr.h" +#include "amdgpu_hmm.h" #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) @@ -82,9 +83,6 @@ struct amdgpu_mman { uint64_t stolen_reserved_offset; uint64_t stolen_reserved_size; - /* discovery */ - uint8_t *discovery_bin; - uint32_t discovery_tmr_size; /* fw reserved memory */ struct amdgpu_bo *fw_reserved_memory; struct amdgpu_bo *fw_reserved_memory_extend; @@ -170,12 +168,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, struct dma_resv *resv, struct dma_fence **fence, bool direct_submit, bool vm_needs_flush, uint32_t copy_flags); -int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, - const struct amdgpu_copy_mem *src, - const struct amdgpu_copy_mem *dst, - uint64_t size, bool tmz, - struct dma_resv *resv, - struct dma_fence **f); int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, struct dma_resv *resv, struct dma_fence **fence); @@ -192,29 +184,16 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range); -void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, - struct hmm_range *range); -bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, - struct hmm_range *range); + struct amdgpu_hmm_range *range); #else static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range) + struct amdgpu_hmm_range *range) { return -EPERM; } -static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, - struct hmm_range *range) -{ -} -static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, - struct hmm_range *range) -{ - return false; -} #endif -void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range); +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range); int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, uint64_t *user_addr); int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 2e039fb778ea..3f0b0e9af4f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -24,6 +24,7 @@ #include <linux/sort.h> #include "amdgpu.h" #include "umc_v6_7.h" +#include "amdgpu_ras_mgr.h" #define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms #define MAX_UMC_HASH_STRING_SIZE 256 @@ -96,67 +97,96 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_eeprom_control *control = &con->eeprom_control; unsigned int error_query_mode; int ret = 0; unsigned long err_count; amdgpu_ras_get_error_query_mode(adev, &error_query_mode); + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if (!err_data->err_addr) + dev_warn(adev->dev, + "Failed to alloc memory for umc error address record!\n"); + else + err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query; + mutex_lock(&con->page_retirement_lock); - ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc)); - if (ret == -EOPNOTSUPP && - error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { - if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && - adev->umc.ras->ras_block.hw_ops->query_ras_error_count) - adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status); - - if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && - adev->umc.ras->ras_block.hw_ops->query_ras_error_address && - adev->umc.max_ras_err_cnt_per_query) { - err_data->err_addr = - kcalloc(adev->umc.max_ras_err_cnt_per_query, - sizeof(struct eeprom_table_record), GFP_KERNEL); - - /* still call query_ras_error_address to clear error status - * even NOMEM error is encountered - */ - if(!err_data->err_addr) - dev_warn(adev->dev, "Failed to alloc memory for " - "umc error address record!\n"); - else - err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query; - - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status); + if (!amdgpu_ras_smu_eeprom_supported(adev)) { + ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc)); + if (ret == -EOPNOTSUPP && + error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_count) + adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, + ras_error_status); + + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if (!err_data->err_addr) + dev_warn(adev->dev, + "Failed to alloc memory for umc error address record!\n"); + else + err_data->err_addr_len = + adev->umc.max_ras_err_cnt_per_query; + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, + ras_error_status); + } + } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY || + (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) { + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_count) + adev->umc.ras->ecc_info_query_ras_error_count(adev, + ras_error_status); + + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if (!err_data->err_addr) + dev_warn(adev->dev, + "Failed to alloc memory for umc error address record!\n"); + else + err_data->err_addr_len = + adev->umc.max_ras_err_cnt_per_query; + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.ras->ecc_info_query_ras_error_address(adev, + ras_error_status); + } } - } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY || - (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) { - if (adev->umc.ras && - adev->umc.ras->ecc_info_query_ras_error_count) - adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status); - - if (adev->umc.ras && - adev->umc.ras->ecc_info_query_ras_error_address && - adev->umc.max_ras_err_cnt_per_query) { - err_data->err_addr = - kcalloc(adev->umc.max_ras_err_cnt_per_query, - sizeof(struct eeprom_table_record), GFP_KERNEL); - - /* still call query_ras_error_address to clear error status - * even NOMEM error is encountered - */ - if(!err_data->err_addr) - dev_warn(adev->dev, "Failed to alloc memory for " - "umc error address record!\n"); - else - err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query; - - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status); + } else { + if (!amdgpu_ras_eeprom_update_record_num(control)) { + err_data->err_addr_cnt = err_data->de_count = + control->ras_num_recs - control->ras_num_recs_old; + amdgpu_ras_eeprom_read_idx(control, err_data->err_addr, + control->ras_num_recs_old, err_data->de_count); } } @@ -166,7 +196,7 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, if ((amdgpu_bad_page_threshold != 0) && err_data->err_addr_cnt) { amdgpu_ras_add_bad_pages(adev, err_data->err_addr, - err_data->err_addr_cnt, false); + err_data->err_addr_cnt, amdgpu_ras_smu_eeprom_supported(adev)); amdgpu_ras_save_bad_pages(adev, &err_count); amdgpu_dpm_send_hbm_bad_pages_num(adev, @@ -244,6 +274,15 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev, } amdgpu_ras_error_data_fini(&err_data); + } else if (amdgpu_uniras_enabled(adev)) { + struct ras_ih_info ih_info = {0}; + + ih_info.block = block; + ih_info.pasid = pasid; + ih_info.reset = reset; + ih_info.pasid_fn = pasid_fn; + ih_info.data = data; + amdgpu_ras_mgr_handle_consumer_interrupt(adev, &ih_info); } else { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); int ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index ec203f9e5ffa..28dff750c47e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -113,6 +113,8 @@ struct amdgpu_umc_ras { uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev, uint64_t mca_addr, uint64_t retired_page); void (*get_retire_flip_bits)(struct amdgpu_device *adev); + void (*mca_ipid_parse)(struct amdgpu_device *adev, uint64_t ipid, + uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid); }; struct amdgpu_umc_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 1add21160d21..9a969175900e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -25,10 +25,13 @@ #include <drm/drm_auth.h> #include <drm/drm_exec.h> #include <linux/pm_runtime.h> +#include <drm/drm_drv.h> #include "amdgpu.h" +#include "amdgpu_reset.h" #include "amdgpu_vm.h" #include "amdgpu_userq.h" +#include "amdgpu_hmm.h" #include "amdgpu_userq_fence.h" u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) @@ -44,10 +47,130 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) return userq_ip_mask; } -int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, - u64 expected_size) +static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev, + enum amdgpu_ring_type ring_type, int reset_type) +{ + + if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX) + return false; + + switch (ring_type) { + case AMDGPU_RING_TYPE_GFX: + if (adev->gfx.gfx_supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_COMPUTE: + if (adev->gfx.compute_supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_SDMA: + if (adev->sdma.supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_VCN_DEC: + case AMDGPU_RING_TYPE_VCN_ENC: + if (adev->vcn.supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_VCN_JPEG: + if (adev->jpeg.supported_reset & reset_type) + return true; + break; + default: + break; + } + return false; +} + +static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev) +{ + if (amdgpu_device_should_recover_gpu(adev)) { + amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->userq_reset_work); + /* Wait for the reset job to complete */ + flush_work(&adev->userq_reset_work); + } +} + +static int +amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const int queue_types[] = { + AMDGPU_RING_TYPE_COMPUTE, + AMDGPU_RING_TYPE_GFX, + AMDGPU_RING_TYPE_SDMA + }; + const int num_queue_types = ARRAY_SIZE(queue_types); + bool gpu_reset = false; + int r = 0; + int i; + + /* Warning if current process mutex is not held */ + WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex)); + + if (unlikely(adev->debug_disable_gpu_ring_reset)) { + dev_err(adev->dev, "userq reset disabled by debug mask\n"); + return 0; + } + + /* + * If GPU recovery feature is disabled system-wide, + * skip all reset detection logic + */ + if (!amdgpu_gpu_recovery) + return 0; + + /* + * Iterate through all queue types to detect and reset problematic queues + * Process each queue type in the defined order + */ + for (i = 0; i < num_queue_types; i++) { + int ring_type = queue_types[i]; + const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type]; + + if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE)) + continue; + + if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 && + funcs && funcs->detect_and_reset) { + r = funcs->detect_and_reset(adev, ring_type); + if (r) { + gpu_reset = true; + break; + } + } + } + + if (gpu_reset) + amdgpu_userq_gpu_reset(adev); + + return r; +} + +static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue, + struct amdgpu_bo_va_mapping *va_map, u64 addr) +{ + struct amdgpu_userq_va_cursor *va_cursor; + struct userq_va_list; + + va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL); + if (!va_cursor) + return -ENOMEM; + + INIT_LIST_HEAD(&va_cursor->list); + va_cursor->gpu_addr = addr; + atomic_set(&va_map->bo_va->userq_va_mapped, 1); + list_add(&va_cursor->list, &queue->userq_va_list); + + return 0; +} + +int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue, + u64 addr, u64 expected_size) { struct amdgpu_bo_va_mapping *va_map; + struct amdgpu_vm *vm = queue->vm; u64 user_addr; u64 size; int r = 0; @@ -67,6 +190,7 @@ int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, /* Only validate the userq whether resident in the VM mapping range */ if (user_addr >= va_map->start && va_map->last - user_addr + 1 >= size) { + amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr); amdgpu_bo_unreserve(vm->root.bo); return 0; } @@ -77,6 +201,76 @@ out_err: return r; } +static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr) +{ + struct amdgpu_bo_va_mapping *mapping; + bool r; + + if (amdgpu_bo_reserve(vm->root.bo, false)) + return false; + + mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); + if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped)) + r = true; + else + r = false; + amdgpu_bo_unreserve(vm->root.bo); + + return r; +} + +static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_userq_va_cursor *va_cursor, *tmp; + int r = 0; + + list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { + r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr); + dev_dbg(queue->userq_mgr->adev->dev, + "validate the userq mapping:%p va:%llx r:%d\n", + queue, va_cursor->gpu_addr, r); + } + + if (r != 0) + return true; + + return false; +} + +static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_userq_va_cursor *va_cursor) +{ + atomic_set(&mapping->bo_va->userq_va_mapped, 0); + list_del(&va_cursor->list); + kfree(va_cursor); +} + +static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_userq_va_cursor *va_cursor, *tmp; + struct amdgpu_bo_va_mapping *mapping; + int r; + + r = amdgpu_bo_reserve(queue->vm->root.bo, false); + if (r) + return r; + + list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { + mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr); + if (!mapping) { + r = -EINVAL; + goto err; + } + dev_dbg(adev->dev, "delete the userq:%p va:%llx\n", + queue, va_cursor->gpu_addr); + amdgpu_userq_buffer_va_list_del(mapping, va_cursor); + } +err: + amdgpu_bo_unreserve(queue->vm->root.bo); + return r; +} + static int amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) @@ -84,17 +278,22 @@ amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_device *adev = uq_mgr->adev; const struct amdgpu_userq_funcs *userq_funcs = adev->userq_funcs[queue->queue_type]; + bool found_hung_queue = false; int r = 0; if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { r = userq_funcs->preempt(uq_mgr, queue); if (r) { queue->state = AMDGPU_USERQ_STATE_HUNG; + found_hung_queue = true; } else { queue->state = AMDGPU_USERQ_STATE_PREEMPTED; } } + if (found_hung_queue) + amdgpu_userq_detect_and_reset_queues(uq_mgr); + return r; } @@ -126,16 +325,23 @@ amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_device *adev = uq_mgr->adev; const struct amdgpu_userq_funcs *userq_funcs = adev->userq_funcs[queue->queue_type]; + bool found_hung_queue = false; int r = 0; if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) || (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) { r = userq_funcs->unmap(uq_mgr, queue); - if (r) + if (r) { queue->state = AMDGPU_USERQ_STATE_HUNG; - else + found_hung_queue = true; + } else { queue->state = AMDGPU_USERQ_STATE_UNMAPPED; + } } + + if (found_hung_queue) + amdgpu_userq_detect_and_reset_queues(uq_mgr); + return r; } @@ -152,26 +358,33 @@ amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr, r = userq_funcs->map(uq_mgr, queue); if (r) { queue->state = AMDGPU_USERQ_STATE_HUNG; + amdgpu_userq_detect_and_reset_queues(uq_mgr); } else { queue->state = AMDGPU_USERQ_STATE_MAPPED; } } + return r; } -static void +static int amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) { struct dma_fence *f = queue->last_fence; - int ret; + int ret = 0; if (f && !dma_fence_is_signaled(f)) { - ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); - if (ret <= 0) + ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT); + if (ret <= 0) { drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", f->context, f->seqno); + queue->state = AMDGPU_USERQ_STATE_HUNG; + return -ETIME; + } } + + return ret; } static void @@ -182,16 +395,27 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_device *adev = uq_mgr->adev; const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; + /* Wait for mode-1 reset to complete */ + down_read(&adev->reset_domain->sem); + + /* Drop the userq reference. */ + amdgpu_userq_buffer_vas_list_cleanup(adev, queue); uq_funcs->mqd_destroy(uq_mgr, queue); amdgpu_userq_fence_driver_free(queue); - idr_remove(&uq_mgr->userq_idr, queue_id); + /* Use interrupt-safe locking since IRQ handlers may access these XArrays */ + xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id); + xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index); + queue->userq_mgr = NULL; + list_del(&queue->userq_va_list); kfree(queue); + + up_read(&adev->reset_domain->sem); } static struct amdgpu_usermode_queue * amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid) { - return idr_find(&uq_mgr->userq_idr, qid); + return xa_load(&uq_mgr->userq_mgr_xa, qid); } void @@ -319,17 +543,6 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, case AMDGPU_HW_IP_DMA: db_size = sizeof(u64); break; - - case AMDGPU_HW_IP_VCN_ENC: - db_size = sizeof(u32); - db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1; - break; - - case AMDGPU_HW_IP_VPE: - db_size = sizeof(u32); - db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1; - break; - default: drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n", db_info->queue_type); @@ -378,10 +591,11 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id) amdgpu_bo_unreserve(queue->db_obj.obj); } amdgpu_bo_unref(&queue->db_obj.obj); - + atomic_dec(&uq_mgr->userq_count[queue->queue_type]); #if defined(CONFIG_DEBUG_FS) debugfs_remove_recursive(queue->debugfs_queue); #endif + amdgpu_userq_detect_and_reset_queues(uq_mgr); r = amdgpu_userq_unmap_helper(uq_mgr, queue); /*TODO: It requires a reset for userq hw unmap error*/ if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) { @@ -391,7 +605,6 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id) amdgpu_userq_cleanup(uq_mgr, queue, queue_id); mutex_unlock(&uq_mgr->userq_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -463,8 +676,9 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) struct amdgpu_db_info db_info; char *queue_name; bool skip_map_queue; + u32 qid; uint64_t index; - int qid, r = 0; + int r = 0; int priority = (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; @@ -487,7 +701,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) * * This will also make sure we have a valid eviction fence ready to be used. */ - mutex_lock(&adev->userq_mutex); amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); uq_funcs = adev->userq_funcs[args->in.ip_type]; @@ -505,14 +718,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) goto unlock; } - /* Validate the userq virtual address.*/ - if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) || - amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || - amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { - r = -EINVAL; - kfree(queue); - goto unlock; - } + INIT_LIST_HEAD(&queue->userq_va_list); queue->doorbell_handle = args->in.doorbell_handle; queue->queue_type = args->in.ip_type; queue->vm = &fpriv->vm; @@ -523,6 +729,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) db_info.db_obj = &queue->db_obj; db_info.doorbell_offset = args->in.doorbell_offset; + /* Validate the userq virtual address.*/ + if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) || + amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || + amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { + r = -EINVAL; + kfree(queue); + goto unlock; + } + /* Convert relative doorbell offset into absolute doorbell index */ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp); if (index == (uint64_t)-EINVAL) { @@ -548,16 +763,27 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) goto unlock; } + /* Wait for mode-1 reset to complete */ + down_read(&adev->reset_domain->sem); + r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL)); + if (r) { + kfree(queue); + up_read(&adev->reset_domain->sem); + goto unlock; + } - qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL); - if (qid < 0) { + r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL); + if (r) { drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); amdgpu_userq_fence_driver_free(queue); uq_funcs->mqd_destroy(uq_mgr, queue); kfree(queue); r = -ENOMEM; + up_read(&adev->reset_domain->sem); goto unlock; } + up_read(&adev->reset_domain->sem); + queue->userq_mgr = uq_mgr; /* don't map the queue if scheduling is halted */ if (adev->userq_halt_for_enforce_isolation && @@ -570,7 +796,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) r = amdgpu_userq_map_helper(uq_mgr, queue); if (r) { drm_file_err(uq_mgr->file, "Failed to map Queue\n"); - idr_remove(&uq_mgr->userq_idr, qid); + xa_erase(&uq_mgr->userq_mgr_xa, qid); amdgpu_userq_fence_driver_free(queue); uq_funcs->mqd_destroy(uq_mgr, queue); kfree(queue); @@ -592,10 +818,10 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) kfree(queue_name); args->out.queue_id = qid; + atomic_inc(&uq_mgr->userq_count[queue->queue_type]); unlock: mutex_unlock(&uq_mgr->userq_mutex); - mutex_unlock(&adev->userq_mutex); return r; } @@ -693,11 +919,19 @@ static int amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_usermode_queue *queue; - int queue_id; + unsigned long queue_id; int ret = 0, r; /* Resume all the queues for this process */ - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) { + + if (!amdgpu_userq_buffer_vas_mapped(queue)) { + drm_file_err(uq_mgr->file, + "trying restore queue without va mapping\n"); + queue->state = AMDGPU_USERQ_STATE_INVALID_VA; + continue; + } + r = amdgpu_userq_restore_helper(uq_mgr, queue); if (r) ret = r; @@ -760,12 +994,21 @@ static int amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); + bool invalidated = false, new_addition = false; + struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_hmm_range *range; struct amdgpu_vm *vm = &fpriv->vm; + unsigned long key, tmp_key; struct amdgpu_bo_va *bo_va; + struct amdgpu_bo *bo; struct drm_exec exec; + struct xarray xa; int ret; + xa_init(&xa); + +retry_lock: drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { ret = amdgpu_vm_lock_pd(vm, &exec, 1); @@ -792,10 +1035,74 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) goto unlock_all; } + if (invalidated) { + xa_for_each(&xa, tmp_key, range) { + bo = range->bo; + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unlock_all; + + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range); + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unlock_all; + } + invalidated = false; + } + ret = amdgpu_vm_handle_moved(adev, vm, NULL); if (ret) goto unlock_all; + key = 0; + /* Validate User Ptr BOs */ + list_for_each_entry(bo_va, &vm->done, base.vm_status) { + bo = bo_va->base.bo; + if (!bo) + continue; + + if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm)) + continue; + + range = xa_load(&xa, key); + if (range && range->bo != bo) { + xa_erase(&xa, key); + amdgpu_hmm_range_free(range); + range = NULL; + } + + if (!range) { + range = amdgpu_hmm_range_alloc(bo); + if (!range) { + ret = -ENOMEM; + goto unlock_all; + } + + xa_store(&xa, key, range, GFP_KERNEL); + new_addition = true; + } + key++; + } + + if (new_addition) { + drm_exec_fini(&exec); + xa_for_each(&xa, tmp_key, range) { + if (!range) + continue; + bo = range->bo; + ret = amdgpu_ttm_tt_get_user_pages(bo, range); + if (ret) + goto unlock_all; + } + + invalidated = true; + new_addition = false; + goto retry_lock; + } + ret = amdgpu_vm_update_pdes(adev, vm, false); if (ret) goto unlock_all; @@ -815,6 +1122,13 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) unlock_all: drm_exec_fini(&exec); + xa_for_each(&xa, tmp_key, range) { + if (!range) + continue; + bo = range->bo; + amdgpu_hmm_range_free(range); + } + xa_destroy(&xa); return ret; } @@ -848,11 +1162,12 @@ static int amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_usermode_queue *queue; - int queue_id; + unsigned long queue_id; int ret = 0, r; + amdgpu_userq_detect_and_reset_queues(uq_mgr); /* Try to unmap all the queues in this process ctx */ - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) { r = amdgpu_userq_preempt_helper(uq_mgr, queue); if (r) ret = r; @@ -863,13 +1178,31 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) return ret; } +void amdgpu_userq_reset_work(struct work_struct *work) +{ + struct amdgpu_device *adev = container_of(work, struct amdgpu_device, + userq_reset_work); + struct amdgpu_reset_context reset_context; + + memset(&reset_context, 0, sizeof(reset_context)); + + reset_context.method = AMD_RESET_METHOD_NONE; + reset_context.reset_req_dev = adev; + reset_context.src = AMDGPU_RESET_SRC_USERQ; + set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + /*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/ + + amdgpu_device_gpu_recover(adev, NULL, &reset_context); +} + static int amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_usermode_queue *queue; - int queue_id, ret; + unsigned long queue_id; + int ret; - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) { struct dma_fence *f = queue->last_fence; if (!f || dma_fence_is_signaled(f)) @@ -889,22 +1222,19 @@ void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_eviction_fence *ev_fence) { - int ret; struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr; + struct amdgpu_device *adev = uq_mgr->adev; + int ret; /* Wait for any pending userqueue fence work to finish */ ret = amdgpu_userq_wait_for_signal(uq_mgr); - if (ret) { - drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n"); - return; - } + if (ret) + dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n"); ret = amdgpu_userq_evict_all(uq_mgr); - if (ret) { - drm_file_err(uq_mgr->file, "Failed to evict userqueue\n"); - return; - } + if (ret) + dev_err(adev->dev, "Failed to evict userqueue\n"); /* Signal current eviction fence */ amdgpu_eviction_fence_signal(evf_mgr, ev_fence); @@ -922,44 +1252,31 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *f struct amdgpu_device *adev) { mutex_init(&userq_mgr->userq_mutex); - idr_init_base(&userq_mgr->userq_idr, 1); + xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC); userq_mgr->adev = adev; userq_mgr->file = file_priv; - mutex_lock(&adev->userq_mutex); - list_add(&userq_mgr->list, &adev->userq_mgr_list); - mutex_unlock(&adev->userq_mutex); - INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker); return 0; } void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) { - struct amdgpu_device *adev = userq_mgr->adev; struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - uint32_t queue_id; + unsigned long queue_id; cancel_delayed_work_sync(&userq_mgr->resume_work); - mutex_lock(&adev->userq_mutex); mutex_lock(&userq_mgr->userq_mutex); - idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) { + amdgpu_userq_detect_and_reset_queues(userq_mgr); + xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) { amdgpu_userq_wait_for_last_fence(userq_mgr, queue); amdgpu_userq_unmap_helper(userq_mgr, queue); amdgpu_userq_cleanup(userq_mgr, queue, queue_id); } - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - if (uqm == userq_mgr) { - list_del(&uqm->list); - break; - } - } - idr_destroy(&userq_mgr->userq_idr); + xa_destroy(&userq_mgr->userq_mgr_xa); mutex_unlock(&userq_mgr->userq_mutex); - mutex_unlock(&adev->userq_mutex); mutex_destroy(&userq_mgr->userq_mutex); } @@ -967,57 +1284,51 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev) { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; - int ret = 0, r; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; + int r; if (!ip_mask) return 0; - mutex_lock(&adev->userq_mutex); - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; cancel_delayed_work_sync(&uqm->resume_work); - mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (adev->in_s0ix) - r = amdgpu_userq_preempt_helper(uqm, queue); - else - r = amdgpu_userq_unmap_helper(uqm, queue); - if (r) - ret = r; - } - mutex_unlock(&uqm->userq_mutex); + guard(mutex)(&uqm->userq_mutex); + amdgpu_userq_detect_and_reset_queues(uqm); + if (adev->in_s0ix) + r = amdgpu_userq_preempt_helper(uqm, queue); + else + r = amdgpu_userq_unmap_helper(uqm, queue); + if (r) + return r; } - mutex_unlock(&adev->userq_mutex); - return ret; + return 0; } int amdgpu_userq_resume(struct amdgpu_device *adev) { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; - int ret = 0, r; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; + int r; if (!ip_mask) return 0; - mutex_lock(&adev->userq_mutex); - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (adev->in_s0ix) - r = amdgpu_userq_restore_helper(uqm, queue); - else - r = amdgpu_userq_map_helper(uqm, queue); - if (r) - ret = r; - } - mutex_unlock(&uqm->userq_mutex); + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; + guard(mutex)(&uqm->userq_mutex); + if (adev->in_s0ix) + r = amdgpu_userq_restore_helper(uqm, queue); + else + r = amdgpu_userq_map_helper(uqm, queue); + if (r) + return r; } - mutex_unlock(&adev->userq_mutex); - return ret; + + return 0; } int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, @@ -1025,33 +1336,32 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; int ret = 0, r; /* only need to stop gfx/compute */ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) return 0; - mutex_lock(&adev->userq_mutex); if (adev->userq_halt_for_enforce_isolation) dev_warn(adev->dev, "userq scheduling already stopped!\n"); adev->userq_halt_for_enforce_isolation = true; - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; cancel_delayed_work_sync(&uqm->resume_work); mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (((queue->queue_type == AMDGPU_HW_IP_GFX) || - (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && - (queue->xcp_id == idx)) { - r = amdgpu_userq_preempt_helper(uqm, queue); - if (r) - ret = r; - } + if (((queue->queue_type == AMDGPU_HW_IP_GFX) || + (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && + (queue->xcp_id == idx)) { + amdgpu_userq_detect_and_reset_queues(uqm); + r = amdgpu_userq_preempt_helper(uqm, queue); + if (r) + ret = r; } mutex_unlock(&uqm->userq_mutex); } - mutex_unlock(&adev->userq_mutex); + return ret; } @@ -1060,21 +1370,20 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; int ret = 0, r; /* only need to stop gfx/compute */ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) return 0; - mutex_lock(&adev->userq_mutex); if (!adev->userq_halt_for_enforce_isolation) dev_warn(adev->dev, "userq scheduling already started!\n"); adev->userq_halt_for_enforce_isolation = false; - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { if (((queue->queue_type == AMDGPU_HW_IP_GFX) || (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && (queue->xcp_id == idx)) { @@ -1082,9 +1391,92 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, if (r) ret = r; } - } mutex_unlock(&uqm->userq_mutex); } - mutex_unlock(&adev->userq_mutex); + return ret; } + +int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t saddr) +{ + u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); + struct amdgpu_bo_va *bo_va = mapping->bo_va; + struct dma_resv *resv = bo_va->base.bo->tbo.base.resv; + int ret = 0; + + if (!ip_mask) + return 0; + + dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr); + /** + * The userq VA mapping reservation should include the eviction fence, + * if the eviction fence can't signal successfully during unmapping, + * then driver will warn to flag this improper unmap of the userq VA. + * Note: The eviction fence may be attached to different BOs, and this + * unmap is only for one kind of userq VAs, so at this point suppose + * the eviction fence is always unsignaled. + */ + if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) { + ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true, + MAX_SCHEDULE_TIMEOUT); + if (ret <= 0) + return -EBUSY; + } + + return 0; +} + +void amdgpu_userq_pre_reset(struct amdgpu_device *adev) +{ + const struct amdgpu_userq_funcs *userq_funcs; + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; + + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; + cancel_delayed_work_sync(&uqm->resume_work); + if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { + amdgpu_userq_wait_for_last_fence(uqm, queue); + userq_funcs = adev->userq_funcs[queue->queue_type]; + userq_funcs->unmap(uqm, queue); + /* just mark all queues as hung at this point. + * if unmap succeeds, we could map again + * in amdgpu_userq_post_reset() if vram is not lost + */ + queue->state = AMDGPU_USERQ_STATE_HUNG; + amdgpu_userq_fence_driver_force_completion(queue); + } + } +} + +int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost) +{ + /* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED + * at this point, we should be able to map it again + * and continue if vram is not lost. + */ + struct amdgpu_userq_mgr *uqm; + struct amdgpu_usermode_queue *queue; + const struct amdgpu_userq_funcs *userq_funcs; + unsigned long queue_id; + int r = 0; + + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; + if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) { + userq_funcs = adev->userq_funcs[queue->queue_type]; + /* Re-map queue */ + r = userq_funcs->map(uqm, queue); + if (r) { + dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id); + continue; + } + queue->state = AMDGPU_USERQ_STATE_MAPPED; + } + } + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h index c027dd916672..c37444427a14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h @@ -37,6 +37,7 @@ enum amdgpu_userq_state { AMDGPU_USERQ_STATE_MAPPED, AMDGPU_USERQ_STATE_PREEMPTED, AMDGPU_USERQ_STATE_HUNG, + AMDGPU_USERQ_STATE_INVALID_VA, }; struct amdgpu_mqd_prop; @@ -47,6 +48,11 @@ struct amdgpu_userq_obj { struct amdgpu_bo *obj; }; +struct amdgpu_userq_va_cursor { + u64 gpu_addr; + struct list_head list; +}; + struct amdgpu_usermode_queue { int queue_type; enum amdgpu_userq_state state; @@ -66,6 +72,8 @@ struct amdgpu_usermode_queue { u32 xcp_id; int priority; struct dentry *debugfs_queue; + + struct list_head userq_va_list; }; struct amdgpu_userq_funcs { @@ -88,12 +96,17 @@ struct amdgpu_userq_funcs { /* Usermode queues for gfx */ struct amdgpu_userq_mgr { - struct idr userq_idr; + /** + * @userq_mgr_xa: Per-process user queue map (queue ID → queue) + * Key: queue_id (unique ID within the process's userq manager) + * Value: struct amdgpu_usermode_queue + */ + struct xarray userq_mgr_xa; struct mutex userq_mutex; struct amdgpu_device *adev; struct delayed_work resume_work; - struct list_head list; struct drm_file *file; + atomic_t userq_count[AMDGPU_RING_TYPE_MAX]; }; struct amdgpu_db_info { @@ -136,7 +149,13 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx); int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx); - -int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, - u64 expected_size); +void amdgpu_userq_reset_work(struct work_struct *work); +void amdgpu_userq_pre_reset(struct amdgpu_device *adev); +int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost); + +int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue, + u64 addr, u64 expected_size); +int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t saddr); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c index 761bad98da3e..eba9fb359047 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c @@ -151,15 +151,16 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d { struct amdgpu_userq_fence *userq_fence, *tmp; struct dma_fence *fence; + unsigned long flags; u64 rptr; int i; if (!fence_drv) return; + spin_lock_irqsave(&fence_drv->fence_list_lock, flags); rptr = amdgpu_userq_fence_read(fence_drv); - spin_lock(&fence_drv->fence_list_lock); list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) { fence = &userq_fence->base; @@ -174,7 +175,7 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d list_del(&userq_fence->link); dma_fence_put(fence); } - spin_unlock(&fence_drv->fence_list_lock); + spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); } void amdgpu_userq_fence_driver_destroy(struct kref *ref) @@ -386,6 +387,7 @@ static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue, amdgpu_bo_unreserve(queue->vm->root.bo); r = amdgpu_bo_reserve(bo, true); if (r) { + amdgpu_bo_unref(&bo); DRM_ERROR("Failed to reserve userqueue wptr bo"); return r; } @@ -537,7 +539,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data, } /* Retrieve the user queue */ - queue = idr_find(&userq_mgr->userq_idr, args->queue_id); + queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id); if (!queue) { r = -ENOENT; goto put_gobj_write; @@ -899,7 +901,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, */ num_fences = dma_fence_dedup_array(fences, num_fences); - waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id); + waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id); if (!waitq) { r = -EINVAL; goto free_fences; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index ce318f5de047..a7d8f1ce6ac2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -41,6 +41,9 @@ #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) /* Firmware Names */ +#ifdef CONFIG_DRM_AMDGPU_SI +#define FIRMWARE_VCE_V1_0 "amdgpu/vce_1_0_0.bin" +#endif #ifdef CONFIG_DRM_AMDGPU_CIK #define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin" #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin" @@ -61,6 +64,9 @@ #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" #define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin" +#ifdef CONFIG_DRM_AMDGPU_SI +MODULE_FIRMWARE(FIRMWARE_VCE_V1_0); +#endif #ifdef CONFIG_DRM_AMDGPU_CIK MODULE_FIRMWARE(FIRMWARE_BONAIRE); MODULE_FIRMWARE(FIRMWARE_KABINI); @@ -88,82 +94,93 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct dma_fence **fence); /** - * amdgpu_vce_sw_init - allocate memory, load vce firmware + * amdgpu_vce_firmware_name() - determine the firmware file name for VCE * * @adev: amdgpu_device pointer - * @size: size for the new BO * - * First step to get VCE online, allocate memory and load the firmware + * Each chip that has VCE IP may need a different firmware. + * This function returns the name of the VCE firmware file + * appropriate for the current chip. */ -int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) +static const char *amdgpu_vce_firmware_name(struct amdgpu_device *adev) { - const char *fw_name; - const struct common_firmware_header *hdr; - unsigned int ucode_version, version_major, version_minor, binary_id; - int i, r; - switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_PITCAIRN: + case CHIP_TAHITI: + case CHIP_VERDE: + return FIRMWARE_VCE_V1_0; +#endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: - fw_name = FIRMWARE_BONAIRE; - break; + return FIRMWARE_BONAIRE; case CHIP_KAVERI: - fw_name = FIRMWARE_KAVERI; - break; + return FIRMWARE_KAVERI; case CHIP_KABINI: - fw_name = FIRMWARE_KABINI; - break; + return FIRMWARE_KABINI; case CHIP_HAWAII: - fw_name = FIRMWARE_HAWAII; - break; + return FIRMWARE_HAWAII; case CHIP_MULLINS: - fw_name = FIRMWARE_MULLINS; - break; + return FIRMWARE_MULLINS; #endif case CHIP_TONGA: - fw_name = FIRMWARE_TONGA; - break; + return FIRMWARE_TONGA; case CHIP_CARRIZO: - fw_name = FIRMWARE_CARRIZO; - break; + return FIRMWARE_CARRIZO; case CHIP_FIJI: - fw_name = FIRMWARE_FIJI; - break; + return FIRMWARE_FIJI; case CHIP_STONEY: - fw_name = FIRMWARE_STONEY; - break; + return FIRMWARE_STONEY; case CHIP_POLARIS10: - fw_name = FIRMWARE_POLARIS10; - break; + return FIRMWARE_POLARIS10; case CHIP_POLARIS11: - fw_name = FIRMWARE_POLARIS11; - break; + return FIRMWARE_POLARIS11; case CHIP_POLARIS12: - fw_name = FIRMWARE_POLARIS12; - break; + return FIRMWARE_POLARIS12; case CHIP_VEGAM: - fw_name = FIRMWARE_VEGAM; - break; + return FIRMWARE_VEGAM; case CHIP_VEGA10: - fw_name = FIRMWARE_VEGA10; - break; + return FIRMWARE_VEGA10; case CHIP_VEGA12: - fw_name = FIRMWARE_VEGA12; - break; + return FIRMWARE_VEGA12; case CHIP_VEGA20: - fw_name = FIRMWARE_VEGA20; - break; + return FIRMWARE_VEGA20; default: - return -EINVAL; + return NULL; } +} + +/** + * amdgpu_vce_early_init() - try to load VCE firmware + * + * @adev: amdgpu_device pointer + * + * Tries to load the VCE firmware. + * + * When not found, returns ENOENT so that the driver can + * still load and initialize the rest of the IP blocks. + * The GPU can function just fine without VCE, they will just + * not support video encoding. + */ +int amdgpu_vce_early_init(struct amdgpu_device *adev) +{ + const char *fw_name = amdgpu_vce_firmware_name(adev); + const struct common_firmware_header *hdr; + unsigned int ucode_version, version_major, version_minor, binary_id; + int r; + + if (!fw_name) + return -ENOENT; r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name); if (r) { - dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", - fw_name); + dev_err(adev->dev, + "amdgpu_vce: Firmware \"%s\" not found or failed to validate (%d)\n", + fw_name, r); + amdgpu_ucode_release(&adev->vce.fw); - return r; + return -ENOENT; } hdr = (const struct common_firmware_header *)adev->vce.fw->data; @@ -172,11 +189,35 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) version_major = (ucode_version >> 20) & 0xfff; version_minor = (ucode_version >> 8) & 0xfff; binary_id = ucode_version & 0xff; - DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n", + dev_info(adev->dev, "Found VCE firmware Version: %d.%d Binary ID: %d\n", version_major, version_minor, binary_id); adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | (binary_id << 8)); + return 0; +} + +/** + * amdgpu_vce_sw_init() - allocate memory for VCE BO + * + * @adev: amdgpu_device pointer + * @size: size for the new BO + * + * First step to get VCE online: allocate memory for VCE BO. + * The VCE firmware binary is copied into the VCE BO later, + * in amdgpu_vce_resume. The VCE executes its code from the + * VCE BO and also uses the space in this BO for its stack and data. + * + * Ideally this BO should be placed in VRAM for optimal performance, + * although technically it also runs from system RAM (albeit slowly). + */ +int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) +{ + int i, r; + + if (!adev->vce.fw) + return -ENOENT; + r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, @@ -285,40 +326,23 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) */ int amdgpu_vce_resume(struct amdgpu_device *adev) { - void *cpu_addr; const struct common_firmware_header *hdr; unsigned int offset; - int r, idx; + int idx; if (adev->vce.vcpu_bo == NULL) return -EINVAL; - r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); - if (r) { - dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); - return r; - } - - r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->vce.vcpu_bo); - dev_err(adev->dev, "(%d) VCE map failed\n", r); - return r; - } - hdr = (const struct common_firmware_header *)adev->vce.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); if (drm_dev_enter(adev_to_drm(adev), &idx)) { - memcpy_toio(cpu_addr, adev->vce.fw->data + offset, + memset_io(adev->vce.cpu_addr, 0, amdgpu_bo_size(adev->vce.vcpu_bo)); + memcpy_toio(adev->vce.cpu_addr, adev->vce.fw->data + offset, adev->vce.fw->size - offset); drm_dev_exit(idx); } - amdgpu_bo_kunmap(adev->vce.vcpu_bo); - - amdgpu_bo_unreserve(adev->vce.vcpu_bo); - return 0; } @@ -427,6 +451,24 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) } /** + * amdgpu_vce_required_gart_pages() - gets number of GART pages required by VCE + * + * @adev: amdgpu_device pointer + * + * Returns how many GART pages we need before GTT for the VCE IP block. + * For VCE1, see vce_v1_0_ensure_vcpu_bo_32bit_addr for details. + * For VCE2+, this is not needed so return zero. + */ +u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev) +{ + /* VCE IP block not added yet, so can't use amdgpu_ip_version */ + if (adev->family == AMDGPU_FAMILY_SI) + return 512; + + return 0; +} + +/** * amdgpu_vce_get_create_msg - generate a VCE create msg * * @ring: ring we should submit the msg to diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 6e53f872d084..1c3464ce5037 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -51,14 +51,17 @@ struct amdgpu_vce { struct drm_sched_entity entity; uint32_t srbm_soft_reset; unsigned num_rings; + uint32_t keyselect; }; +int amdgpu_vce_early_init(struct amdgpu_device *adev); int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size); int amdgpu_vce_sw_fini(struct amdgpu_device *adev); int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); +u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index dc8a17bcc3c8..82624b44e661 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -100,7 +100,8 @@ #define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \ ({ \ - uint32_t internal_reg_offset, addr; \ + /* To avoid a -Wunused-but-set-variable warning. */ \ + uint32_t internal_reg_offset __maybe_unused, addr; \ bool video_range, video1_range, aon_range, aon1_range; \ \ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ @@ -161,7 +162,8 @@ #define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \ ({ \ - uint32_t internal_reg_offset, addr; \ + /* To avoid a -Wunused-but-set-variable warning. */ \ + uint32_t internal_reg_offset __maybe_unused, addr; \ bool video_range, video1_range, aon_range, aon1_range; \ \ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index f96beb96c75c..47a6ce4fdc74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -44,6 +44,18 @@ vf2pf_info->ucode_info[ucode].version = ver; \ } while (0) +#define mmRCC_CONFIG_MEMSIZE 0xde3 + +const char *amdgpu_virt_dynamic_crit_table_name[] = { + "IP DISCOVERY", + "VBIOS IMG", + "RAS TELEMETRY", + "DATA EXCHANGE", + "BAD PAGE INFO", + "INIT HEADER", + "LAST", +}; + bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) { /* By now all MMIO pages except mailbox are blocked */ @@ -150,9 +162,10 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev) virt->ops->req_init_data(adev); if (adev->virt.req_init_data_ver > 0) - DRM_INFO("host supports REQ_INIT_DATA handshake\n"); + dev_info(adev->dev, "host supports REQ_INIT_DATA handshake of critical_region_version %d\n", + adev->virt.req_init_data_ver); else - DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n"); + dev_warn(adev->dev, "host doesn't support REQ_INIT_DATA handshake\n"); } /** @@ -205,12 +218,12 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) &adev->virt.mm_table.gpu_addr, (void *)&adev->virt.mm_table.cpu_addr); if (r) { - DRM_ERROR("failed to alloc mm table and error = %d.\n", r); + dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r); return r; } memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE); - DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n", + dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n", adev->virt.mm_table.gpu_addr, adev->virt.mm_table.cpu_addr); return 0; @@ -390,7 +403,9 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev) if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, AMDGPU_GPU_PAGE_SIZE, &bo, NULL)) - DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp); + dev_dbg(adev->dev, + "RAS WARN: reserve vram for retired page %llx fail\n", + bp); data->bps_bo[i] = bo; } data->last_reserved = i + 1; @@ -658,10 +673,34 @@ out: schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); } +static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data) +{ + uint32_t dataexchange_offset = + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset; + uint32_t dataexchange_size = + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10; + uint64_t pos = 0; + + dev_info(adev->dev, + "Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n", + dataexchange_offset, dataexchange_size); + + if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) { + dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n"); + return -EINVAL; + } + + pos = (uint64_t)dataexchange_offset; + amdgpu_device_vram_access(adev, pos, pfvf_data, + dataexchange_size, false); + + return 0; +} + void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) { if (adev->virt.vf2pf_update_interval_ms != 0) { - DRM_INFO("clean up the vf2pf work item\n"); + dev_info(adev->dev, "clean up the vf2pf work item\n"); cancel_delayed_work_sync(&adev->virt.vf2pf_work); adev->virt.vf2pf_update_interval_ms = 0; } @@ -669,13 +708,15 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) { + uint32_t *pfvf_data = NULL; + adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; adev->virt.vf2pf_update_retry_cnt = 0; if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) { - DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!"); + dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!"); } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { /* go through this logic in ip_init and reset to init workqueue*/ amdgpu_virt_exchange_data(adev); @@ -684,11 +725,34 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); } else if (adev->bios != NULL) { /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) - (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) { + pfvf_data = + kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10, + GFP_KERNEL); + if (!pfvf_data) { + dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n"); + return; + } - amdgpu_virt_read_pf2vf_data(adev); + if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data)) + goto free_pfvf_data; + + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *)pfvf_data; + + amdgpu_virt_read_pf2vf_data(adev); + +free_pfvf_data: + kfree(pfvf_data); + pfvf_data = NULL; + adev->virt.fw_reserve.p_pf2vf = NULL; + } else { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10)); + + amdgpu_virt_read_pf2vf_data(adev); + } } } @@ -701,23 +765,38 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { if (adev->mman.fw_vram_usage_va) { - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); - adev->virt.fw_reserve.p_vf2pf = - (struct amd_sriov_msg_vf2pf_info_header *) - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); - adev->virt.fw_reserve.ras_telemetry = - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); + if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->mman.fw_vram_usage_va + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset); + adev->virt.fw_reserve.p_vf2pf = + (struct amd_sriov_msg_vf2pf_info_header *) + (adev->mman.fw_vram_usage_va + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset + + (AMD_SRIOV_MSG_SIZE_KB << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.fw_vram_usage_va + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset); + } else { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10)); + adev->virt.fw_reserve.p_vf2pf = + (struct amd_sriov_msg_vf2pf_info_header *) + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10)); + } } else if (adev->mman.drv_vram_usage_va) { adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) - (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10)); adev->virt.fw_reserve.p_vf2pf = (struct amd_sriov_msg_vf2pf_info_header *) - (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10)); adev->virt.fw_reserve.ras_telemetry = - (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10)); } amdgpu_virt_read_pf2vf_data(adev); @@ -816,7 +895,7 @@ static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg) break; default: /* other chip doesn't support SRIOV */ is_sriov = false; - DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type); + dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type); break; } } @@ -838,10 +917,220 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev) RATELIMIT_MSG_ON_RELEASE); mutex_init(&adev->virt.ras.ras_telemetry_mutex); + mutex_init(&adev->virt.access_req_mutex); adev->virt.ras.cper_rptr = 0; } +static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t *buf_end) +{ + uint32_t sum = 0; + + if (buf_start >= buf_end) + return 0; + + for (; buf_start < buf_end; buf_start++) + sum += buf_start[0]; + + return 0xffffffff - sum; +} + +int amdgpu_virt_init_critical_region(struct amdgpu_device *adev) +{ + struct amd_sriov_msg_init_data_header *init_data_hdr = NULL; + u64 init_hdr_offset = adev->virt.init_data_header.offset; + u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */ + u64 vram_size; + u64 end; + int r = 0; + uint8_t checksum = 0; + + /* Skip below init if critical region version != v2 */ + if (adev->virt.req_init_data_ver != GPU_CRIT_REGION_V2) + return 0; + + if (init_hdr_offset < 0) { + dev_err(adev->dev, "Invalid init header offset\n"); + return -EINVAL; + } + + vram_size = RREG32(mmRCC_CONFIG_MEMSIZE); + if (!vram_size || vram_size == U32_MAX) + return -EINVAL; + vram_size <<= 20; + + if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) { + dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n"); + return -EINVAL; + } + + /* Allocate for init_data_hdr */ + init_data_hdr = kzalloc(sizeof(struct amd_sriov_msg_init_data_header), GFP_KERNEL); + if (!init_data_hdr) + return -ENOMEM; + + amdgpu_device_vram_access(adev, (uint64_t)init_hdr_offset, (uint32_t *)init_data_hdr, + sizeof(struct amd_sriov_msg_init_data_header), false); + + /* Table validation */ + if (strncmp(init_data_hdr->signature, + AMDGPU_SRIOV_CRIT_DATA_SIGNATURE, + AMDGPU_SRIOV_CRIT_DATA_SIG_LEN) != 0) { + dev_err(adev->dev, "Invalid init data signature: %.4s\n", + init_data_hdr->signature); + r = -EINVAL; + goto out; + } + + checksum = amdgpu_virt_crit_region_calc_checksum( + (uint8_t *)&init_data_hdr->initdata_offset, + (uint8_t *)init_data_hdr + + sizeof(struct amd_sriov_msg_init_data_header)); + if (checksum != init_data_hdr->checksum) { + dev_err(adev->dev, "Found unmatching checksum from calculation 0x%x and init_data 0x%x\n", + checksum, init_data_hdr->checksum); + r = -EINVAL; + goto out; + } + + memset(&adev->virt.crit_regn, 0, sizeof(adev->virt.crit_regn)); + memset(adev->virt.crit_regn_tbl, 0, sizeof(adev->virt.crit_regn_tbl)); + + adev->virt.crit_regn.offset = init_data_hdr->initdata_offset; + adev->virt.crit_regn.size_kb = init_data_hdr->initdata_size_in_kb; + + /* Validation and initialization for each table entry */ + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_IPD_TABLE_ID)) { + if (!init_data_hdr->ip_discovery_size_in_kb || + init_data_hdr->ip_discovery_size_in_kb > DISCOVERY_TMR_SIZE) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_IPD_TABLE_ID], + init_data_hdr->ip_discovery_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset = + init_data_hdr->ip_discovery_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb = + init_data_hdr->ip_discovery_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID)) { + if (!init_data_hdr->vbios_img_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID], + init_data_hdr->vbios_img_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].offset = + init_data_hdr->vbios_img_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].size_kb = + init_data_hdr->vbios_img_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID)) { + if (!init_data_hdr->ras_tele_info_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID], + init_data_hdr->ras_tele_info_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset = + init_data_hdr->ras_tele_info_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].size_kb = + init_data_hdr->ras_tele_info_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID)) { + if (!init_data_hdr->dataexchange_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID], + init_data_hdr->dataexchange_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset = + init_data_hdr->dataexchange_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb = + init_data_hdr->dataexchange_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID)) { + if (!init_data_hdr->bad_page_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID], + init_data_hdr->bad_page_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].offset = + init_data_hdr->bad_page_info_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].size_kb = + init_data_hdr->bad_page_size_in_kb; + } + + /* Validation for critical region info */ + if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) { + dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n", + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb); + r = -EINVAL; + goto out; + } + + /* reserved memory starts from crit region base offset with the size of 5MB */ + adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset; + adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10; + dev_info(adev->dev, + "critical region v%d requested to reserve memory start at %08llx with %llu KB.\n", + init_data_hdr->version, + adev->mman.fw_vram_usage_start_offset, + adev->mman.fw_vram_usage_size >> 10); + + adev->virt.is_dynamic_crit_regn_enabled = true; + +out: + kfree(init_data_hdr); + init_data_hdr = NULL; + + return r; +} + +int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev, + int data_id, uint8_t *binary, u32 *size) +{ + uint32_t data_offset = 0; + uint32_t data_size = 0; + enum amd_sriov_msg_table_id_enum data_table_id = data_id; + + if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID) + return -EINVAL; + + data_offset = adev->virt.crit_regn_tbl[data_table_id].offset; + data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10; + + /* Validate on input params */ + if (!binary || !size || *size < (uint64_t)data_size) + return -EINVAL; + + /* Proceed to copy the dynamic content */ + amdgpu_device_vram_access(adev, + (uint64_t)data_offset, (uint32_t *)binary, data_size, false); + *size = (uint64_t)data_size; + + dev_dbg(adev->dev, + "Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n", + amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size); + + return 0; +} + void amdgpu_virt_init(struct amdgpu_device *adev) { bool is_sriov = false; @@ -1289,7 +1578,7 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc case AMDGPU_RAS_BLOCK__MPIO: return RAS_TELEMETRY_GPU_BLOCK_MPIO; default: - DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n", + dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block); return RAS_TELEMETRY_GPU_BLOCK_COUNT; } @@ -1304,7 +1593,7 @@ static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev, checksum = host_telemetry->header.checksum; used_size = host_telemetry->header.used_size; - if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10)) return 0; tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL); @@ -1383,7 +1672,7 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev, checksum = host_telemetry->header.checksum; used_size = host_telemetry->header.used_size; - if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10)) return -EINVAL; cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL); @@ -1515,7 +1804,7 @@ static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev, checksum = host_telemetry->header.checksum; used_size = host_telemetry->header.used_size; - if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10)) return 0; tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index d1172c8e58c4..01d5bca2dee1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -54,6 +54,12 @@ #define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2 +/* Signature used to validate the SR-IOV dynamic critical region init data header ("INDA") */ +#define AMDGPU_SRIOV_CRIT_DATA_SIGNATURE "INDA" +#define AMDGPU_SRIOV_CRIT_DATA_SIG_LEN 4 + +#define IS_SRIOV_CRIT_REGN_ENTRY_VALID(hdr, id) ((hdr)->valid_tables & (1 << (id))) + enum amdgpu_sriov_vf_mode { SRIOV_VF_MODE_BARE_METAL = 0, SRIOV_VF_MODE_ONE_VF, @@ -144,6 +150,7 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_RAS_CAPS = (1 << 9), AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10), AMDGIM_FEATURE_RAS_CPER = (1 << 11), + AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK = (1 << 12), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -262,6 +269,11 @@ struct amdgpu_virt_ras { DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST); +struct amdgpu_virt_region { + uint32_t offset; + uint32_t size_kb; +}; + /* GPU virtualization */ struct amdgpu_virt { uint32_t caps; @@ -289,6 +301,12 @@ struct amdgpu_virt { bool ras_init_done; uint32_t reg_access; + /* dynamic(v2) critical regions */ + struct amdgpu_virt_region init_data_header; + struct amdgpu_virt_region crit_regn; + struct amdgpu_virt_region crit_regn_tbl[AMD_SRIOV_MSG_MAX_TABLE_ID]; + bool is_dynamic_crit_regn_enabled; + /* vf2pf message */ struct delayed_work vf2pf_work; uint32_t vf2pf_update_interval_ms; @@ -307,6 +325,8 @@ struct amdgpu_virt { /* Spinlock to protect access to the RLCG register interface */ spinlock_t rlcg_reg_lock; + struct mutex access_req_mutex; + union amd_sriov_ras_caps ras_en_caps; union amd_sriov_ras_caps ras_telemetry_en_caps; struct amdgpu_virt_ras ras; @@ -378,6 +398,9 @@ struct amdgpu_video_codec_info; #define amdgpu_sriov_ras_cper_en(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER) +#define amdgpu_sriov_xgmi_ta_ext_peer_link_en(adev) \ +((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK) + static inline bool is_virtual_machine(void) { #if defined(CONFIG_X86) @@ -424,6 +447,10 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev); void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); void amdgpu_virt_init(struct amdgpu_device *adev); +int amdgpu_virt_init_critical_region(struct amdgpu_device *adev); +int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev, + int data_id, uint8_t *binary, u32 *size); + bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c1a801203949..a67285118c37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -484,15 +484,19 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, spin_lock(&vm->status_lock); while (!list_is_head(prev->next, &vm->done)) { bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status); - spin_unlock(&vm->status_lock); bo = bo_va->base.bo; if (bo) { + amdgpu_bo_ref(bo); + spin_unlock(&vm->status_lock); + ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1); + amdgpu_bo_unref(&bo); if (unlikely(ret)) return ret; + + spin_lock(&vm->status_lock); } - spin_lock(&vm->status_lock); prev = prev->next; } spin_unlock(&vm->status_lock); @@ -779,7 +783,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool cleaner_shader_needed = false; bool pasid_mapping_needed = false; struct dma_fence *fence = NULL; - struct amdgpu_fence *af; unsigned int patch; int r; @@ -842,12 +845,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, } if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) { - r = amdgpu_fence_emit(ring, &fence, NULL, 0); + r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0); if (r) return r; - /* this is part of the job's context */ - af = container_of(fence, struct amdgpu_fence, base); - af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; + fence = &job->hw_vm_fence->base; + /* get a ref for the job */ + dma_fence_get(fence); } if (vm_flush_needed) { @@ -1066,7 +1069,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params, } /* Prepare a TLB flush fence to be attached to PTs */ - if (!params->unlocked && vm->is_compute_context) { + if (!params->unlocked) { amdgpu_vm_tlb_fence_create(params->adev, vm, fence); /* Makes sure no PD/PT is freed before the flush */ @@ -1952,6 +1955,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; struct amdgpu_vm *vm = bo_va->base.vm; bool valid = true; + int r; saddr /= AMDGPU_GPU_PAGE_SIZE; @@ -1972,6 +1976,17 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, return -ENOENT; } + /* It's unlikely to happen that the mapping userq hasn't been idled + * during user requests GEM unmap IOCTL except for forcing the unmap + * from user space. + */ + if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) { + r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr); + if (unlikely(r == -EBUSY)) + dev_warn_once(adev->dev, + "Attempt to unmap an active userq buffer\n"); + } + list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); mapping->bo_va = NULL; @@ -2078,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_bo *bo = before->bo_va->base.bo; amdgpu_vm_it_insert(before, &vm->va); - if (before->flags & AMDGPU_PTE_PRT_FLAG(adev)) + if (before->flags & AMDGPU_VM_PAGE_PRT) amdgpu_vm_prt_get(adev); if (amdgpu_vm_is_bo_always_valid(vm, bo) && @@ -2093,7 +2108,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_bo *bo = after->bo_va->base.bo; amdgpu_vm_it_insert(after, &vm->va); - if (after->flags & AMDGPU_PTE_PRT_FLAG(adev)) + if (after->flags & AMDGPU_VM_PAGE_PRT) amdgpu_vm_prt_get(adev); if (amdgpu_vm_is_bo_always_valid(vm, bo) && @@ -2828,8 +2843,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) */ void amdgpu_vm_manager_init(struct amdgpu_device *adev) { - unsigned i; - /* Concurrent flushes are only possible starting with Vega10 and * are broken on Navi10 and Navi14. */ @@ -2838,11 +2851,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) adev->asic_type == CHIP_NAVI14); amdgpu_vmid_mgr_init(adev); - adev->vm_manager.fence_context = - dma_fence_context_alloc(AMDGPU_MAX_RINGS); - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - adev->vm_manager.seqno[i] = 0; - spin_lock_init(&adev->vm_manager.prt_lock); atomic_set(&adev->vm_manager.num_prt_users, 0); @@ -2908,8 +2916,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* We only have requirement to reserve vmid from gfxhub */ - amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0)); - break; + return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0)); case AMDGPU_VM_OP_UNRESERVE_VMID: amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0)); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index cf0ec94e8a07..15d757c016cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -453,10 +453,6 @@ struct amdgpu_vm_manager { unsigned int first_kfd_vmid; bool concurrent_flush; - /* Handling of VM fences */ - u64 fence_context; - unsigned seqno[AMDGPU_MAX_RINGS]; - uint64_t max_pfn; uint32_t num_level; uint32_t block_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 1ede308a7c67..aad530c46a9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -298,6 +298,9 @@ int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num) { int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 }; + if (adev->gmc.xgmi.num_physical_nodes <= 1) + return -EINVAL; + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { case IP_VERSION(6, 4, 0): case IP_VERSION(6, 4, 1): @@ -333,6 +336,10 @@ static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link } i = global_link_num / n; + + if (!(adev->aid_mask & BIT(i))) + return U32_MAX; + addr += adev->asic_funcs->encode_ext_smn_addressing(i); return RREG32_PCIE_EXT(addr); @@ -342,6 +349,9 @@ int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num) { u32 xgmi_state_reg_val; + if (adev->gmc.xgmi.num_physical_nodes <= 1) + return -EINVAL; + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { case IP_VERSION(6, 4, 0): case IP_VERSION(6, 4, 1): @@ -958,28 +968,6 @@ static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_inf return 0; } -static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev, - struct amdgpu_device *peer_adev) -{ - struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info; - struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info; - - for (int i = 0; i < peer_info->num_nodes; i++) { - if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) { - for (int j = 0; j < top_info->num_nodes; j++) { - if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) { - peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops; - peer_info->nodes[i].is_sharing_enabled = - top_info->nodes[j].is_sharing_enabled; - peer_info->nodes[i].num_links = - top_info->nodes[j].num_links; - return; - } - } - } - } -} - int amdgpu_xgmi_add_device(struct amdgpu_device *adev) { struct psp_xgmi_topology_info *top_info; @@ -1065,11 +1053,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) /* To do: continue with some node failed or disable the whole hive*/ goto exit_unlock; } - - /* fill the topology info for peers instead of getting from PSP */ - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - amdgpu_xgmi_fill_topology_info(adev, tmp_adev); - } } else { /* get latest topology info for each device from psp */ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 3a79ed7d8031..3cdb1e0eca37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -23,26 +23,84 @@ #ifndef AMDGV_SRIOV_MSG__H_ #define AMDGV_SRIOV_MSG__H_ -/* unit in kilobytes */ -#define AMD_SRIOV_MSG_VBIOS_OFFSET 0 -#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64 -#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB -#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4 -#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048 -#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2 -#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64 +#define AMD_SRIOV_MSG_SIZE_KB 1 + /* - * layout + * layout v1 * 0 64KB 65KB 66KB 68KB 132KB * | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ... * | 64KB | 1KB | 1KB | 2KB | 64KB | ... */ -#define AMD_SRIOV_MSG_SIZE_KB 1 -#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB -#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) -#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) -#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB) +/* + * layout v2 (offsets are dynamically allocated and the offsets below are examples) + * 0 1KB 64KB 65KB 66KB 68KB 132KB + * | INITD_H | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ... + * | 1KB | 64KB | 1KB | 1KB | 2KB | 64KB | ... + * + * Note: PF2VF + VF2PF + Bad Page = DataExchange region (allocated contiguously) + */ + +/* v1 layout sizes */ +#define AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 64 +#define AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 1 +#define AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 1 +#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1 2 +#define AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 64 +#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 \ + (AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 + AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 + \ + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1) + +/* v1 offsets */ +#define AMD_SRIOV_MSG_VBIOS_OFFSET_V1 0 +#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 +#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048 +#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 +#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 \ + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB) +#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 \ + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB) +#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 \ + (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1) +#define AMD_SRIOV_MSG_INIT_DATA_TOT_SIZE_KB_V1 \ + (AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 + AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 + \ + AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1) + +enum amd_sriov_crit_region_version { + GPU_CRIT_REGION_V1 = 1, + GPU_CRIT_REGION_V2 = 2, +}; + +/* v2 layout offset enum (in order of allocation) */ +enum amd_sriov_msg_table_id_enum { + AMD_SRIOV_MSG_IPD_TABLE_ID = 0, + AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, + AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID, + AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID, + AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID, + AMD_SRIOV_MSG_INITD_H_TABLE_ID, + AMD_SRIOV_MSG_MAX_TABLE_ID, +}; + +struct amd_sriov_msg_init_data_header { + char signature[4]; /* "INDA" */ + uint32_t version; + uint32_t checksum; + uint32_t initdata_offset; /* 0 */ + uint32_t initdata_size_in_kb; /* 5MB */ + uint32_t valid_tables; + uint32_t vbios_img_offset; + uint32_t vbios_img_size_in_kb; + uint32_t dataexchange_offset; + uint32_t dataexchange_size_in_kb; + uint32_t ras_tele_info_offset; + uint32_t ras_tele_info_size_in_kb; + uint32_t ip_discovery_offset; + uint32_t ip_discovery_size_in_kb; + uint32_t bad_page_info_offset; + uint32_t bad_page_size_in_kb; + uint32_t reserved[8]; +}; /* * PF2VF history log: @@ -102,7 +160,8 @@ union amd_sriov_msg_feature_flags { uint32_t ras_caps : 1; uint32_t ras_telemetry : 1; uint32_t ras_cper : 1; - uint32_t reserved : 20; + uint32_t xgmi_ta_ext_peer_link : 1; + uint32_t reserved : 19; } flags; uint32_t all; }; @@ -140,8 +199,9 @@ union amd_sriov_ras_caps { uint64_t block_jpeg : 1; uint64_t block_ih : 1; uint64_t block_mpio : 1; + uint64_t block_mmsch : 1; uint64_t poison_propogation_mode : 1; - uint64_t reserved : 44; + uint64_t reserved : 43; } bits; uint64_t all; }; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 811124ff88a8..f9e2edf5260b 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -407,7 +407,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, return -EINVAL; } - if (adev->kfd.init_complete && !amdgpu_in_reset(adev)) + if (adev->kfd.init_complete && !amdgpu_in_reset(adev) && + !adev->in_suspend) flags |= AMDGPU_XCP_OPS_KFD; if (flags & AMDGPU_XCP_OPS_KFD) { diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 41f4705bdbbd..876a3256dba4 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -156,6 +156,9 @@ static int cik_ih_irq_init(struct amdgpu_device *adev) /* enable irqs */ cik_ih_enable_interrupts(adev); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -192,6 +195,9 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih_soft) + goto out; + if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; /* When a ring buffer overflow happen start parsing interrupt @@ -211,6 +217,8 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev, tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(mmIH_RB_CNTL, tmp); } + +out: return (wptr & ih->ptr_mask); } @@ -306,6 +314,10 @@ static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); + if (r) + return r; + r = amdgpu_irq_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 2f891fb846d5..bc7a2e06ab5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -157,6 +157,9 @@ static int cz_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ cz_ih_enable_interrupts(adev); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -194,6 +197,9 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih_soft) + goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -297,6 +303,10 @@ static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); + if (r) + return r; + r = amdgpu_irq_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 8841d7213de4..d75b9940f248 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4956,7 +4956,8 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); adev->gfx.compute_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); - if (!amdgpu_sriov_vf(adev)) { + if (!amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } @@ -9951,6 +9952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush, }; static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d61eb9f187c6..8a2ee2de390f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1821,13 +1821,15 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(11, 0, 3): if ((adev->gfx.me_fw_version >= 2280) && (adev->gfx.mec_fw_version >= 2410) && - !amdgpu_sriov_vf(adev)) { + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } break; default: - if (!amdgpu_sriov_vf(adev)) { + if (!amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } @@ -2438,7 +2440,7 @@ static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) if (version_minor == 3) gfx_v11_0_load_rlcp_rlcv_microcode(adev); } - + return 0; } @@ -3886,7 +3888,7 @@ static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) } memcpy(fw, fw_data, fw_size); - + amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); @@ -5872,9 +5874,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (flags & AMDGPU_IB_PREEMPTED) control |= INDIRECT_BUFFER_PRE_RESUME(1); - if (vmid) + if (vmid && !ring->adev->gfx.rs64_enable) gfx_v11_0_ring_emit_de_meta(ring, - (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); + !amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED)); } amdgpu_ring_write(ring, header); @@ -7318,6 +7320,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { .emit_wreg = gfx_v11_0_ring_emit_wreg, .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, }; static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 93fde0f9af87..d01d2712cf57 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -1548,7 +1548,8 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(12, 0, 1): if ((adev->gfx.me_fw_version >= 2660) && (adev->gfx.mec_fw_version >= 2920) && - !amdgpu_sriov_vf(adev)) { + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } @@ -5595,6 +5596,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { .emit_wreg = gfx_v12_0_ring_emit_wreg, .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, }; static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 7693b7953426..80565392313f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3102,6 +3102,11 @@ static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block) return r; } + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 5976ed55d9db..2b7aba22ecc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4399,6 +4399,11 @@ static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block) gfx_v7_0_gpu_early_init(adev); + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 0856ff65288c..1c87375e1dd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2023,6 +2023,11 @@ static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + return 0; } @@ -6939,6 +6944,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { .pad_ib = amdgpu_ring_generic_pad_ib, .emit_rreg = gfx_v8_0_ring_emit_rreg, .emit_wreg = gfx_v8_0_ring_emit_wreg, + .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, }; static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index dd19a97436db..0148d7ff34d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2409,7 +2409,7 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block) amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); adev->gfx.compute_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); - if (!amdgpu_sriov_vf(adev)) + if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset) adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); @@ -7586,6 +7586,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, }; static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 77f9d5b9a556..cbb74ffc4792 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1149,14 +1149,16 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): if ((adev->gfx.mec_fw_version >= 155) && - !amdgpu_sriov_vf(adev)) { + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; } break; case IP_VERSION(9, 5, 0): if ((adev->gfx.mec_fw_version >= 21) && - !amdgpu_sriov_vf(adev)) { + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; } @@ -2152,7 +2154,8 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) return 0; } -static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore) +static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, + bool restore) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; @@ -2186,8 +2189,6 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, b atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } - - return 0; } static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id) @@ -2220,7 +2221,7 @@ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring; - int i, r; + int i; gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); @@ -2228,9 +2229,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); - if (r) - return r; + gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); } return amdgpu_gfx_enable_kcq(adev, xcc_id); @@ -2292,7 +2291,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode); } else { - if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, + if (adev->in_suspend) + amdgpu_xcp_restore_partition_mode(adev->xcp_mgr); + else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, AMDGPU_XCP_FL_NONE) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) r = amdgpu_xcp_switch_partition_mode( @@ -3605,11 +3606,8 @@ pipe_reset: return r; } - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); - if (r) { - dev_err(adev->dev, "fail to init kcq\n"); - return r; - } + gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); + spin_lock_irqsave(&kiq->ring_lock, flags); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); if (r) { @@ -4798,6 +4796,7 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { .emit_wreg = gfx_v9_4_3_ring_emit_wreg, .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, }; static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index d7499be8c4bf..ce6e04242c52 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -103,8 +103,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; - bool retry_fault = !!(entry->src_data[1] & 0x80); - bool write_fault = !!(entry->src_data[1] & 0x20); + bool retry_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY); + bool write_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE); struct amdgpu_task_info *task_info; uint32_t status = 0; u64 addr; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 7bc389d9f5c4..ba59ee8e398a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -103,12 +103,41 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; + bool retry_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY); + bool write_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE); uint32_t status = 0; u64 addr; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; + if (retry_fault) { + /* Returning 1 here also prevents sending the IV to the KFD */ + + /* Process it only if it's the first fault for this address */ + if (entry->ih != &adev->irq.ih_soft && + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, + entry->timestamp)) + return 1; + + /* Delegate it to a different ring if the hardware hasn't + * already done it. + */ + if (entry->ih == &adev->irq.ih) { + amdgpu_irq_delegate(adev, entry, 8); + return 1; + } + + /* Try to handle the recoverable page faults by filling page + * tables + */ + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, + entry->timestamp, write_fault)) + return 1; + } + if (!amdgpu_sriov_vf(adev)) { /* * Issue a dummy read to wait for the status register to diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index f4a19357ccbc..7a9d6894e321 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -91,6 +91,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { struct amdgpu_vmhub *hub; + bool retry_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY); + bool write_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE); uint32_t status = 0; u64 addr; @@ -102,6 +106,31 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, else hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + if (retry_fault) { + /* Returning 1 here also prevents sending the IV to the KFD */ + + /* Process it only if it's the first fault for this address */ + if (entry->ih != &adev->irq.ih_soft && + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, + entry->timestamp)) + return 1; + + /* Delegate it to a different ring if the hardware hasn't + * already done it. + */ + if (entry->ih == &adev->irq.ih) { + amdgpu_irq_delegate(adev, entry, 8); + return 1; + } + + /* Try to handle the recoverable page faults by filling page + * tables + */ + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, + entry->timestamp, write_fault)) + return 1; + } + if (!amdgpu_sriov_vf(adev)) { /* * Issue a dummy read to wait for the status register to @@ -312,9 +341,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, return; } - mutex_lock(&adev->mman.gtt_window_lock); gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0); - mutex_unlock(&adev->mman.gtt_window_lock); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index f6ad7911f1e6..a8ec95f42926 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -213,7 +213,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, mc, base); - amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); + amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW); } static void gmc_v6_0_mc_program(struct amdgpu_device *adev) @@ -610,23 +610,21 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) } static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, - u32 status, u32 addr, u32 mc_client) + u32 status, u32 addr) { u32 mc_id; u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, PROTECTIONS); - char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, - (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, MEMORY_CLIENT_ID); - dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", + dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from %d\n", protections, vmid, addr, REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, MEMORY_CLIENT_RW) ? - "write" : "read", block, mc_client, mc_id); + "write" : "read", mc_id); } static const u32 mc_cg_registers[] = { @@ -1072,6 +1070,12 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, { u32 addr, status; + /* Delegate to the soft IRQ handler ring */ + if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) { + amdgpu_irq_delegate(adev, entry, 4); + return 1; + } + addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); @@ -1079,6 +1083,10 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, if (!addr && !status) return 0; + amdgpu_vm_update_fault_cache(adev, entry->pasid, + ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT, + status, AMDGPU_GFXHUB(0)); + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) gmc_v6_0_set_fault_enable_default(adev, false); @@ -1089,7 +1097,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, addr); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); - gmc_v6_0_vm_decode_fault(adev, status, addr, 0); + gmc_v6_0_vm_decode_fault(adev, status, addr); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0e5e54d0a9a5..fbd0bf147f50 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1261,6 +1261,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, { u32 addr, status, mc_client, vmid; + /* Delegate to the soft IRQ handler ring */ + if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) { + amdgpu_irq_delegate(adev, entry, 4); + return 1; + } + addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index e1509480dfc2..6551b60f2584 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1439,6 +1439,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, return 0; } + /* Delegate to the soft IRQ handler ring */ + if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) { + amdgpu_irq_delegate(adev, entry, 4); + return 1; + } + addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 0d1dd587db5f..8ad7519f7b58 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -544,8 +544,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - bool retry_fault = !!(entry->src_data[1] & 0x80); - bool write_fault = !!(entry->src_data[1] & 0x20); + bool retry_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY); + bool write_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE); uint32_t status = 0, cid = 0, rw = 0, fed = 0; struct amdgpu_task_info *task_info; struct amdgpu_vmhub *hub; @@ -1843,6 +1845,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) && + adev->rev_id == 0x3) + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; + if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { vram_info = RREG32(regBIF_BIOS_SCRATCH_4); adev->gmc.vram_vendor = vram_info & 0xF; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 1317ede131b6..01cadf898c00 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -157,6 +157,9 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ iceland_ih_enable_interrupts(adev); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -194,6 +197,9 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih_soft) + goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -296,6 +302,10 @@ static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); + if (r) + return r; + r = amdgpu_irq_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c index baf097d2e1ac..ab0bf880d3d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c @@ -878,6 +878,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = { .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr, .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr, .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index 1cd9eaeef38f..64cae89357b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -205,10 +205,11 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev, int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev); struct mes_detect_and_reset_queue_input input; struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; unsigned int hung_db_num = 0; - int queue_id, r, i; + unsigned long queue_id; u32 db_array[8]; + bool found_hung_queue = false; + int r, i; if (db_array_size > 8) { dev_err(adev->dev, "DB array size (%d vs 8) too small\n", @@ -227,22 +228,26 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev, if (r) { dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r); } else if (hung_db_num) { - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (queue->queue_type == queue_type) { - for (i = 0; i < hung_db_num; i++) { - if (queue->doorbell_index == db_array[i]) { - queue->state = AMDGPU_USERQ_STATE_HUNG; - atomic_inc(&adev->gpu_reset_counter); - amdgpu_userq_fence_driver_force_completion(queue); - drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL); - } + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + if (queue->queue_type == queue_type) { + for (i = 0; i < hung_db_num; i++) { + if (queue->doorbell_index == db_array[i]) { + queue->state = AMDGPU_USERQ_STATE_HUNG; + found_hung_queue = true; + atomic_inc(&adev->gpu_reset_counter); + amdgpu_userq_fence_driver_force_completion(queue); + drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL); } } } } } + if (found_hung_queue) { + /* Resume scheduling after hang recovery */ + r = amdgpu_mes_resume(adev); + } + return r; } @@ -254,7 +259,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type]; struct drm_amdgpu_userq_in *mqd_user = args_in; struct amdgpu_mqd_prop *userq_props; - struct amdgpu_gfx_shadow_info shadow_info; int r; /* Structure to initialize MQD for userqueue using generic MQD init function */ @@ -280,8 +284,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->doorbell_index = queue->doorbell_index; userq_props->fence_address = queue->fence_drv->gpu_addr; - if (adev->gfx.funcs->get_gfx_shadow_info) - adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; @@ -298,8 +300,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, goto free_mqd; } - if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va, - max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE))) + r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va, + 2048); + if (r) goto free_mqd; userq_props->eop_gpu_addr = compute_mqd->eop_va; @@ -311,6 +314,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, kfree(compute_mqd); } else if (queue->queue_type == AMDGPU_HW_IP_GFX) { struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11; + struct amdgpu_gfx_shadow_info shadow_info; + + if (adev->gfx.funcs->get_gfx_shadow_info) { + adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); + } else { + r = -EINVAL; + goto free_mqd; + } if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) { DRM_ERROR("Invalid GFX MQD\n"); @@ -330,8 +341,13 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->tmz_queue = mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; - if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va, - shadow_info.shadow_size)) + r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va, + shadow_info.shadow_size); + if (r) + goto free_mqd; + r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va, + shadow_info.csa_size); + if (r) goto free_mqd; kfree(mqd_gfx_v11); @@ -350,9 +366,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, r = -ENOMEM; goto free_mqd; } - - if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va, - shadow_info.csa_size)) + r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va, + 32); + if (r) goto free_mqd; userq_props->csa_addr = mqd_sdma_v11->csa_va; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index da575bb1377f..3a52754b5cad 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -369,6 +369,7 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes, struct mes_remove_queue_input *input) { union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; + uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK; memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); @@ -379,6 +380,9 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes, mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr; + if (mes_rev >= 0x60) + mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset; + return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt), offsetof(union MESAPI__REMOVE_QUEUE, api_status)); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 7f3512d9de07..744e95d3984a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -361,6 +361,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes, struct mes_remove_queue_input *input) { union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; + uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK; memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); @@ -371,6 +372,9 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes, mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr; + if (mes_rev >= 0x5a) + mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset; + return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE, &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt), diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index e5282a5d05d9..e7cd07383d56 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -173,13 +173,17 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev, enum idh_request req, u32 data1, u32 data2, u32 data3) { - int r, retry = 1; + struct amdgpu_virt *virt = &adev->virt; + int r = 0, retry = 1; enum idh_event event = -1; + mutex_lock(&virt->access_req_mutex); send_request: - if (amdgpu_ras_is_rma(adev)) - return -ENODEV; + if (amdgpu_ras_is_rma(adev)) { + r = -ENODEV; + goto out; + } xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3); @@ -217,17 +221,25 @@ send_request: if (req != IDH_REQ_GPU_INIT_DATA) { dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r); - return r; + goto out; } else /* host doesn't support REQ_GPU_INIT_DATA handshake */ adev->virt.req_init_data_ver = 0; } else { if (req == IDH_REQ_GPU_INIT_DATA) { - adev->virt.req_init_data_ver = - RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1); - - /* assume V1 in case host doesn't set version number */ - if (adev->virt.req_init_data_ver < 1) - adev->virt.req_init_data_ver = 1; + switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) { + case GPU_CRIT_REGION_V2: + adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2; + adev->virt.init_data_header.offset = + RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2); + adev->virt.init_data_header.size_kb = + RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3); + break; + default: + adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1; + adev->virt.init_data_header.offset = -1; + adev->virt.init_data_header.size_kb = 0; + break; + } } } @@ -238,7 +250,10 @@ send_request: } } - return 0; +out: + mutex_unlock(&virt->access_req_mutex); + + return r; } static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, @@ -285,7 +300,8 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, static int xgpu_nv_request_init_data(struct amdgpu_device *adev) { - return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); + return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA, + 0, GPU_CRIT_REGION_V2, 0); } static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 1c22bc11c1f8..bdfd2917e3ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -41,19 +41,21 @@ static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev) { - u32 tmp; - - tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0)); - /* If it is VF or subrevision holds a non-zero value, that should be used */ - if (tmp || amdgpu_sriov_vf(adev)) - return tmp; + u32 rev_id; - /* If discovery subrev is not updated, use register version */ - tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); - tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, - STRAP_ATI_REV_ID_DEV0_F0); + /* + * fetch the sub-revision field from the IP-discovery table + * (returns zero if the table entry is not populated). + */ + if (amdgpu_sriov_vf(adev)) { + rev_id = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0)); + } else { + rev_id = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); + rev_id = REG_GET_FIELD(rev_id, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, + STRAP_ATI_REV_ID_DEV0_F0); + } - return tmp; + return rev_id; } static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 64b240b51f1a..a9be7a505026 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -142,13 +142,37 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) return err; } -static int psp_v11_0_wait_for_bootloader(struct psp_context *psp) +static int psp_v11_wait_for_tos_unload(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; + uint32_t sol_reg1, sol_reg2; + int retry_loop; + /* Wait for the TOS to be unloaded */ + for (retry_loop = 0; retry_loop < 20; retry_loop++) { + sol_reg1 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + usleep_range(1000, 2000); + sol_reg2 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + if (sol_reg1 == sol_reg2) + return 0; + } + dev_err(adev->dev, "TOS unload failed, C2PMSG_33: %x C2PMSG_81: %x", + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_33), + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81)); + + return -ETIME; +} + +static int psp_v11_0_wait_for_bootloader(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; int ret; int retry_loop; + /* For a reset done at the end of S3, only wait for TOS to be unloaded */ + if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev)) + return psp_v11_wait_for_tos_unload(psp); + for (retry_loop = 0; retry_loop < 20; retry_loop++) { /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 36b1ca73c2ed..a1443990d5c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -2361,11 +2361,15 @@ static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): - if ((adev->gfx.mec_fw_version >= 0xb0) && amdgpu_dpm_reset_sdma_is_supported(adev)) + if ((adev->gfx.mec_fw_version >= 0xb0) && + amdgpu_dpm_reset_sdma_is_supported(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; case IP_VERSION(9, 5, 0): - if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev)) + if ((adev->gfx.mec_fw_version >= 0xf) && + amdgpu_dpm_reset_sdma_is_supported(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 7dc67a22a7a0..8ddc4df06a1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1429,7 +1429,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(5, 0, 2): case IP_VERSION(5, 0, 5): if ((adev->sdma.instance[0].fw_version >= 35) && - !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 3bd44c24f692..51101b0aa2fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -342,7 +342,7 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->me > 1) { - amdgpu_asic_flush_hdp(adev, ring); + amdgpu_hdp_flush(adev, ring); } else { ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; @@ -1348,12 +1348,14 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(5, 2, 3): case IP_VERSION(5, 2, 4): if ((adev->sdma.instance[0].fw_version >= 76) && - !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; case IP_VERSION(5, 2, 5): if ((adev->sdma.instance[0].fw_version >= 34) && - !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index db6e41967f12..217040044987 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1356,7 +1356,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): if ((adev->sdma.instance[0].fw_version >= 21) && - !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1389,7 +1390,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; break; case IP_VERSION(6, 0, 3): - if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq) + if (adev->sdma.instance[0].fw_version >= 29 && !adev->sdma.disable_uq) adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; break; case IP_VERSION(6, 1, 0): diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index 326ecc8d37d2..2b81344dcd66 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -1337,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) adev->sdma.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); - if (!amdgpu_sriov_vf(adev)) + if (!amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_sdma_sysfs_reset_mask_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index e0f139de7991..f7288372ee61 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -45,6 +45,7 @@ #include "dce_v6_0.h" #include "si.h" #include "uvd_v3_1.h" +#include "vce_v1_0.h" #include "uvd/uvd_4_0_d.h" @@ -921,8 +922,6 @@ static const u32 hainan_mgcg_cgcg_init[] = 0x3630, 0xfffffff0, 0x00000100, }; -/* XXX: update when we support VCE */ -#if 0 /* tahiti, pitcairn, verde */ static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] = { @@ -940,13 +939,7 @@ static const struct amdgpu_video_codecs tahiti_video_codecs_encode = .codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array), .codec_array = tahiti_video_codecs_encode_array, }; -#else -static const struct amdgpu_video_codecs tahiti_video_codecs_encode = -{ - .codec_count = 0, - .codec_array = NULL, -}; -#endif + /* oland and hainan don't support encode */ static const struct amdgpu_video_codecs hainan_video_codecs_encode = { @@ -1925,6 +1918,14 @@ static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) ~VCEPLL_BYPASS_EN_MASK); if (!evclk || !ecclk) { + /* + * On some chips, the PLL takes way too long to get out of + * sleep mode, causing a timeout waiting on CTLACK/CTLACK2. + * Leave the PLL running in bypass mode. + */ + if (adev->pdev->device == 0x6780) + return 0; + /* Keep the Bypass mode, put PLL to sleep */ WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, ~VCEPLL_SLEEP_MASK); @@ -2717,7 +2718,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev) else amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); - /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ + amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); break; case CHIP_OLAND: amdgpu_device_ip_block_add(adev, &si_common_ip_block); @@ -2735,7 +2736,6 @@ int si_set_ip_blocks(struct amdgpu_device *adev) else amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); - /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ break; case CHIP_HAINAN: amdgpu_device_ip_block_add(adev, &si_common_ip_block); diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 1df00f8a2406..66f650f87243 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -96,6 +96,9 @@ static int si_ih_irq_init(struct amdgpu_device *adev) pci_set_master(adev->pdev); si_ih_enable_interrupts(adev); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -112,6 +115,9 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih_soft) + goto out; + if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", @@ -127,6 +133,8 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev, tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(IH_RB_CNTL, tmp); } + +out: return (wptr & ih->ptr_mask); } @@ -175,6 +183,10 @@ static int si_ih_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); + if (r) + return r; + return amdgpu_irq_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h index cbd4f8951cfa..561462a8332e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sid.h +++ b/drivers/gpu/drm/amd/amdgpu/sid.h @@ -582,45 +582,6 @@ #define DMA_PACKET_NOP 0xf /* VCE */ -#define VCE_STATUS 0x20004 -#define VCE_VCPU_CNTL 0x20014 -#define VCE_CLK_EN (1 << 0) -#define VCE_VCPU_CACHE_OFFSET0 0x20024 -#define VCE_VCPU_CACHE_SIZE0 0x20028 -#define VCE_VCPU_CACHE_OFFSET1 0x2002c -#define VCE_VCPU_CACHE_SIZE1 0x20030 -#define VCE_VCPU_CACHE_OFFSET2 0x20034 -#define VCE_VCPU_CACHE_SIZE2 0x20038 -#define VCE_SOFT_RESET 0x20120 -#define VCE_ECPU_SOFT_RESET (1 << 0) -#define VCE_FME_SOFT_RESET (1 << 2) -#define VCE_RB_BASE_LO2 0x2016c -#define VCE_RB_BASE_HI2 0x20170 -#define VCE_RB_SIZE2 0x20174 -#define VCE_RB_RPTR2 0x20178 -#define VCE_RB_WPTR2 0x2017c -#define VCE_RB_BASE_LO 0x20180 -#define VCE_RB_BASE_HI 0x20184 -#define VCE_RB_SIZE 0x20188 -#define VCE_RB_RPTR 0x2018c -#define VCE_RB_WPTR 0x20190 -#define VCE_CLOCK_GATING_A 0x202f8 -#define VCE_CLOCK_GATING_B 0x202fc -#define VCE_UENC_CLOCK_GATING 0x205bc -#define VCE_UENC_REG_CLOCK_GATING 0x205c0 -#define VCE_FW_REG_STATUS 0x20e10 -# define VCE_FW_REG_STATUS_BUSY (1 << 0) -# define VCE_FW_REG_STATUS_PASS (1 << 3) -# define VCE_FW_REG_STATUS_DONE (1 << 11) -#define VCE_LMI_FW_START_KEYSEL 0x20e18 -#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20 -#define VCE_LMI_CTRL2 0x20e74 -#define VCE_LMI_CTRL 0x20e98 -#define VCE_LMI_VM_CTRL 0x20ea0 -#define VCE_LMI_SWAP_CNTL 0x20eb4 -#define VCE_LMI_SWAP_CNTL1 0x20eb8 -#define VCE_LMI_CACHE_CTRL 0x20ef4 - #define VCE_CMD_NO_OP 0x00000000 #define VCE_CMD_END 0x00000001 #define VCE_CMD_IB 0x00000002 @@ -629,7 +590,6 @@ #define VCE_CMD_IB_AUTO 0x00000005 #define VCE_CMD_SEMAPHORE 0x00000006 - //#dce stupp /* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */ #define CRTC0_REGISTER_OFFSET (0x1b7c - 0x1b7c) //(0x6df0 - 0x6df0)/4 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 9785fada4fa7..42f5d9c0e3af 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -853,10 +853,6 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev) { u32 sol_reg; - /* CP hangs in IGT reloading test on RN, reset to WA */ - if (adev->asic_type == CHIP_RENOIR) - return true; - if (amdgpu_gmc_need_reset_on_init(adev)) return true; if (amdgpu_psp_tos_reload_needed(adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 7d17ae56f901..ee8038df17e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -159,6 +159,9 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ tonga_ih_enable_interrupts(adev); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -196,6 +199,9 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih_soft) + goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -306,6 +312,10 @@ static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); + if (r) + return r; + adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 8dc32787d625..0f5b1719fda5 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -711,6 +711,19 @@ static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev, return die; } +static void umc_v12_0_mca_ipid_parse(struct amdgpu_device *adev, uint64_t ipid, + uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid) +{ + if (did) + *did = MCA_IPID_2_DIE_ID(ipid); + if (ch) + *ch = MCA_IPID_2_UMC_CH(ipid); + if (umc_inst) + *umc_inst = MCA_IPID_2_UMC_INST(ipid); + if (sid) + *sid = MCA_IPID_2_SOCKET_ID(ipid); +} + struct amdgpu_umc_ras umc_v12_0_ras = { .ras_block = { .hw_ops = &umc_v12_0_ras_hw_ops, @@ -724,5 +737,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = { .convert_ras_err_addr = umc_v12_0_convert_error_address, .get_die_id_from_pa = umc_v12_0_get_die_id, .get_retire_flip_bits = umc_v12_0_get_retire_flip_bits, + .mca_ipid_parse = umc_v12_0_mca_ipid_parse, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c new file mode 100644 index 000000000000..9ae424618556 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c @@ -0,0 +1,839 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * Copyright 2025 Valve Corporation + * Copyright 2025 Alexandre Demers + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Christian König <christian.koenig@amd.com> + * Timur Kristóf <timur.kristof@gmail.com> + * Alexandre Demers <alexandre.f.demers@gmail.com> + */ + +#include <linux/firmware.h> + +#include "amdgpu.h" +#include "amdgpu_vce.h" +#include "amdgpu_gart.h" +#include "sid.h" +#include "vce_v1_0.h" +#include "vce/vce_1_0_d.h" +#include "vce/vce_1_0_sh_mask.h" +#include "oss/oss_1_0_d.h" +#include "oss/oss_1_0_sh_mask.h" + +#define VCE_V1_0_FW_SIZE (256 * 1024) +#define VCE_V1_0_STACK_SIZE (64 * 1024) +#define VCE_V1_0_DATA_SIZE (7808 * (AMDGPU_MAX_VCE_HANDLES + 1)) +#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 + +#define VCE_V1_0_GART_PAGE_START \ + (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS) +#define VCE_V1_0_GART_ADDR_START \ + (VCE_V1_0_GART_PAGE_START * AMDGPU_GPU_PAGE_SIZE) + +static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev); +static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev); + +struct vce_v1_0_fw_signature { + int32_t offset; + uint32_t length; + int32_t number; + struct { + uint32_t chip_id; + uint32_t keyselect; + uint32_t nonce[4]; + uint32_t sigval[4]; + } val[8]; +}; + +/** + * vce_v1_0_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t vce_v1_0_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->me == 0) + return RREG32(mmVCE_RB_RPTR); + else + return RREG32(mmVCE_RB_RPTR2); +} + +/** + * vce_v1_0_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t vce_v1_0_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->me == 0) + return RREG32(mmVCE_RB_WPTR); + else + return RREG32(mmVCE_RB_WPTR2); +} + +/** + * vce_v1_0_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void vce_v1_0_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->me == 0) + WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); + else + WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); +} + +static int vce_v1_0_lmi_clean(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + if (RREG32(mmVCE_LMI_STATUS) & 0x337f) + return 0; + + mdelay(10); + } + } + + return -ETIMEDOUT; +} + +static int vce_v1_0_firmware_loaded(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + if (RREG32(mmVCE_STATUS) & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) + return 0; + mdelay(10); + } + + dev_err(adev->dev, "VCE not responding, trying to reset the ECPU\n"); + + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + } + + return -ETIMEDOUT; +} + +static void vce_v1_0_init_cg(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32(mmVCE_CLOCK_GATING_A); + tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK; + WREG32(mmVCE_CLOCK_GATING_A, tmp); + + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0x1e; + tmp &= ~0xe100e1; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp &= ~0xff9ff000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3ff; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); +} + +/** + * vce_v1_0_load_fw_signature - load firmware signature into VCPU BO + * + * @adev: amdgpu_device pointer + * + * The VCE1 firmware validation mechanism needs a firmware signature. + * This function finds the signature appropriate for the current + * ASIC and writes that into the VCPU BO. + */ +static int vce_v1_0_load_fw_signature(struct amdgpu_device *adev) +{ + const struct common_firmware_header *hdr; + struct vce_v1_0_fw_signature *sign; + unsigned int ucode_offset; + uint32_t chip_id; + u32 *cpu_addr; + int i; + + hdr = (const struct common_firmware_header *)adev->vce.fw->data; + ucode_offset = le32_to_cpu(hdr->ucode_array_offset_bytes); + cpu_addr = adev->vce.cpu_addr; + + sign = (void *)adev->vce.fw->data + ucode_offset; + + switch (adev->asic_type) { + case CHIP_TAHITI: + chip_id = 0x01000014; + break; + case CHIP_VERDE: + chip_id = 0x01000015; + break; + case CHIP_PITCAIRN: + chip_id = 0x01000016; + break; + default: + dev_err(adev->dev, "asic_type %#010x was not found!", adev->asic_type); + return -EINVAL; + } + + for (i = 0; i < le32_to_cpu(sign->number); ++i) { + if (le32_to_cpu(sign->val[i].chip_id) == chip_id) + break; + } + + if (i == le32_to_cpu(sign->number)) { + dev_err(adev->dev, "chip_id 0x%x for %s was not found in VCE firmware", + chip_id, amdgpu_asic_name[adev->asic_type]); + return -EINVAL; + } + + cpu_addr += (256 - 64) / 4; + memcpy_toio(&cpu_addr[0], &sign->val[i].nonce[0], 16); + cpu_addr[4] = cpu_to_le32(le32_to_cpu(sign->length) + 64); + + memset_io(&cpu_addr[5], 0, 44); + memcpy_toio(&cpu_addr[16], &sign[1], hdr->ucode_size_bytes - sizeof(*sign)); + + cpu_addr += (le32_to_cpu(sign->length) + 64) / 4; + memcpy_toio(&cpu_addr[0], &sign->val[i].sigval[0], 16); + + adev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect); + + return 0; +} + +static int vce_v1_0_wait_for_fw_validation(struct amdgpu_device *adev) +{ + int i; + + dev_dbg(adev->dev, "VCE keyselect: %d", adev->vce.keyselect); + WREG32(mmVCE_LMI_FW_START_KEYSEL, adev->vce.keyselect); + + for (i = 0; i < 10; ++i) { + mdelay(10); + if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK) + break; + } + + if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK)) { + dev_err(adev->dev, "VCE FW validation timeout\n"); + return -ETIMEDOUT; + } + + if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__PASS_MASK)) { + dev_err(adev->dev, "VCE FW validation failed\n"); + return -EINVAL; + } + + for (i = 0; i < 10; ++i) { + mdelay(10); + if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK)) + break; + } + + if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK) { + dev_err(adev->dev, "VCE FW busy timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int vce_v1_0_mc_resume(struct amdgpu_device *adev) +{ + uint32_t offset; + uint32_t size; + + /* + * When the keyselect is already set, don't perturb VCE FW. + * Validation seems to always fail the second time. + */ + if (RREG32(mmVCE_LMI_FW_START_KEYSEL)) { + dev_dbg(adev->dev, "keyselect already set: 0x%x (on CPU: 0x%x)\n", + RREG32(mmVCE_LMI_FW_START_KEYSEL), adev->vce.keyselect); + + WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); + return 0; + } + + WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); + WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); + WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); + WREG32(mmVCE_CLOCK_GATING_B, 0); + + WREG32_P(mmVCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4); + + WREG32(mmVCE_LMI_CTRL, 0x00398000); + + WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); + WREG32(mmVCE_LMI_SWAP_CNTL, 0); + WREG32(mmVCE_LMI_SWAP_CNTL1, 0); + WREG32(mmVCE_LMI_VM_CTRL, 0); + + WREG32(mmVCE_VCPU_SCRATCH7, AMDGPU_MAX_VCE_HANDLES); + + offset = adev->vce.gpu_addr + AMDGPU_VCE_FIRMWARE_OFFSET; + size = VCE_V1_0_FW_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE0, size); + + offset += size; + size = VCE_V1_0_STACK_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE1, size); + + offset += size; + size = VCE_V1_0_DATA_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE2, size); + + WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); + + return vce_v1_0_wait_for_fw_validation(adev); +} + +/** + * vce_v1_0_is_idle() - Check idle status of VCE1 IP block + * + * @ip_block: amdgpu_ip_block pointer + * + * Check whether VCE is busy according to VCE_STATUS. + * Also check whether the SRBM thinks VCE is busy, although + * SRBM_STATUS.VCE_BUSY seems to be bogus because it + * appears to mirror the VCE_STATUS.VCPU_REPORT_FW_LOADED bit. + */ +static bool vce_v1_0_is_idle(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + bool busy = + (RREG32(mmVCE_STATUS) & (VCE_STATUS__JOB_BUSY_MASK | VCE_STATUS__UENC_BUSY_MASK)) || + (RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); + + return !busy; +} + +static int vce_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + unsigned int i; + + for (i = 0; i < adev->usec_timeout; i++) { + udelay(1); + if (vce_v1_0_is_idle(ip_block)) + return 0; + } + return -ETIMEDOUT; +} + +/** + * vce_v1_0_start - start VCE block + * + * @adev: amdgpu_device pointer + * + * Setup and start the VCE block + */ +static int vce_v1_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + WREG32_P(mmVCE_STATUS, 1, ~1); + + r = vce_v1_0_mc_resume(adev); + if (r) + return r; + + ring = &adev->vce.ring[0]; + WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32(mmVCE_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + + ring = &adev->vce.ring[1]; + WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32(mmVCE_RB_BASE_LO2, lower_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); + + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, + ~VCE_VCPU_CNTL__CLK_EN_MASK); + + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK | + VCE_SOFT_RESET__FME_SOFT_RESET_MASK, + ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK | + VCE_SOFT_RESET__FME_SOFT_RESET_MASK)); + + mdelay(100); + + WREG32_P(mmVCE_SOFT_RESET, 0, + ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK | + VCE_SOFT_RESET__FME_SOFT_RESET_MASK)); + + r = vce_v1_0_firmware_loaded(adev); + + /* Clear VCE_STATUS, otherwise SRBM thinks VCE1 is busy. */ + WREG32(mmVCE_STATUS, 0); + + if (r) { + dev_err(adev->dev, "VCE not responding, giving up\n"); + return r; + } + + return 0; +} + +static int vce_v1_0_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ip_block *ip_block; + int status; + int i; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE); + if (!ip_block) + return -EINVAL; + + if (vce_v1_0_lmi_clean(adev)) + dev_warn(adev->dev, "VCE not idle\n"); + + if (vce_v1_0_wait_for_idle(ip_block)) + dev_warn(adev->dev, "VCE busy: VCE_STATUS=0x%x, SRBM_STATUS2=0x%x\n", + RREG32(mmVCE_STATUS), RREG32(mmSRBM_STATUS2)); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8)); + + for (i = 0; i < 100; ++i) { + status = RREG32(mmVCE_LMI_STATUS); + if (status & 0x240) + break; + mdelay(1); + } + + WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); + + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK | + VCE_SOFT_RESET__FME_SOFT_RESET_MASK, + ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK | + VCE_SOFT_RESET__FME_SOFT_RESET_MASK)); + + WREG32(mmVCE_STATUS, 0); + + return 0; +} + +static void vce_v1_0_enable_mgcg(struct amdgpu_device *adev, bool enable) +{ + u32 tmp; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { + tmp = RREG32(mmVCE_CLOCK_GATING_A); + tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK; + WREG32(mmVCE_CLOCK_GATING_A, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp &= ~0x1ff000; + tmp |= 0xff800000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3ff; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + } else { + tmp = RREG32(mmVCE_CLOCK_GATING_A); + tmp &= ~VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK; + WREG32(mmVCE_CLOCK_GATING_A, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp |= 0x1ff000; + tmp &= ~0xff800000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp |= 0x3ff; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + } +} + +static int vce_v1_0_early_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_early_init(adev); + if (r) + return r; + + adev->vce.num_rings = 2; + + vce_v1_0_set_ring_funcs(adev); + vce_v1_0_set_irq_funcs(adev); + + return 0; +} + +/** + * vce_v1_0_ensure_vcpu_bo_32bit_addr() - ensure the VCPU BO has a 32-bit address + * + * @adev: amdgpu_device pointer + * + * Due to various hardware limitations, the VCE1 requires + * the VCPU BO to be in the low 32 bit address range. + * Ensure that the VCPU BO has a 32-bit GPU address, + * or return an error code when that isn't possible. + * + * To accomodate that, we put GART to the LOW address range + * and reserve some GART pages where we map the VCPU BO, + * so that it gets a 32-bit address. + */ +static int vce_v1_0_ensure_vcpu_bo_32bit_addr(struct amdgpu_device *adev) +{ + u64 gpu_addr = amdgpu_bo_gpu_offset(adev->vce.vcpu_bo); + u64 bo_size = amdgpu_bo_size(adev->vce.vcpu_bo); + u64 max_vcpu_bo_addr = 0xffffffff - bo_size; + u64 num_pages = ALIGN(bo_size, AMDGPU_GPU_PAGE_SIZE) / AMDGPU_GPU_PAGE_SIZE; + u64 pa = amdgpu_gmc_vram_pa(adev, adev->vce.vcpu_bo); + u64 flags = AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_VALID; + + /* + * Check if the VCPU BO already has a 32-bit address. + * Eg. if MC is configured to put VRAM in the low address range. + */ + if (gpu_addr <= max_vcpu_bo_addr) + return 0; + + /* Check if we can map the VCPU BO in GART to a 32-bit address. */ + if (adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START > max_vcpu_bo_addr) + return -EINVAL; + + amdgpu_gart_map_vram_range(adev, pa, VCE_V1_0_GART_PAGE_START, + num_pages, flags, adev->gart.ptr); + adev->vce.gpu_addr = adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START; + if (adev->vce.gpu_addr > max_vcpu_bo_addr) + return -EINVAL; + + return 0; +} + +static int vce_v1_0_sw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ring *ring; + int r, i; + + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq); + if (r) + return r; + + r = amdgpu_vce_sw_init(adev, VCE_V1_0_FW_SIZE + + VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE); + if (r) + return r; + + r = amdgpu_vce_resume(adev); + if (r) + return r; + r = vce_v1_0_load_fw_signature(adev); + if (r) + return r; + r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev); + if (r) + return r; + + for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + + ring = &adev->vce.ring[i]; + sprintf(ring->name, "vce%d", i); + r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, + hw_prio, NULL); + if (r) + return r; + } + + return r; +} + +static int vce_v1_0_sw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_suspend(adev); + if (r) + return r; + + return amdgpu_vce_sw_fini(adev); +} + +/** + * vce_v1_0_hw_init - start and test VCE block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int vce_v1_0_hw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_vce(adev, true); + else + amdgpu_asic_set_vce_clocks(adev, 10000, 10000); + + for (i = 0; i < adev->vce.num_rings; i++) { + r = amdgpu_ring_test_helper(&adev->vce.ring[i]); + if (r) + return r; + } + + dev_info(adev->dev, "VCE initialized successfully.\n"); + + return 0; +} + +static int vce_v1_0_hw_fini(struct amdgpu_ip_block *ip_block) +{ + int r; + + r = vce_v1_0_stop(ip_block->adev); + if (r) + return r; + + cancel_delayed_work_sync(&ip_block->adev->vce.idle_work); + return 0; +} + +static int vce_v1_0_suspend(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r; + + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->vce.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_vce(adev, false); + } else { + amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + } + + r = vce_v1_0_hw_fini(ip_block); + if (r) { + dev_err(adev->dev, "vce_v1_0_hw_fini() failed with error %i", r); + return r; + } + + return amdgpu_vce_suspend(adev); +} + +static int vce_v1_0_resume(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_resume(adev); + if (r) + return r; + r = vce_v1_0_load_fw_signature(adev); + if (r) + return r; + r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev); + if (r) + return r; + + return vce_v1_0_hw_init(ip_block); +} + +static int vce_v1_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + uint32_t val = 0; + + if (state == AMDGPU_IRQ_STATE_ENABLE) + val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; + + WREG32_P(mmVCE_SYS_INT_EN, val, + ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + return 0; +} + +static int vce_v1_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg(adev->dev, "IH: VCE\n"); + switch (entry->src_data[0]) { + case 0: + case 1: + amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]); + break; + default: + dev_err(adev->dev, "Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static int vce_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = ip_block->adev; + + vce_v1_0_init_cg(adev); + vce_v1_0_enable_mgcg(adev, state == AMD_CG_STATE_GATE); + + return 0; +} + +static int vce_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = ip_block->adev; + + /* + * This doesn't actually powergate the VCE block. + * That's done in the dpm code via the SMC. This + * just re-inits the block as necessary. The actual + * gating still happens in the dpm code. We should + * revisit this when there is a cleaner line between + * the smc and the hw blocks + */ + if (state == AMD_PG_STATE_GATE) + return vce_v1_0_stop(adev); + else + return vce_v1_0_start(adev); +} + +static const struct amd_ip_funcs vce_v1_0_ip_funcs = { + .name = "vce_v1_0", + .early_init = vce_v1_0_early_init, + .sw_init = vce_v1_0_sw_init, + .sw_fini = vce_v1_0_sw_fini, + .hw_init = vce_v1_0_hw_init, + .hw_fini = vce_v1_0_hw_fini, + .suspend = vce_v1_0_suspend, + .resume = vce_v1_0_resume, + .is_idle = vce_v1_0_is_idle, + .wait_for_idle = vce_v1_0_wait_for_idle, + .set_clockgating_state = vce_v1_0_set_clockgating_state, + .set_powergating_state = vce_v1_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs vce_v1_0_ring_funcs = { + .type = AMDGPU_RING_TYPE_VCE, + .align_mask = 0xf, + .nop = VCE_CMD_NO_OP, + .support_64bit_ptrs = false, + .no_user_fence = true, + .get_rptr = vce_v1_0_ring_get_rptr, + .get_wptr = vce_v1_0_ring_get_wptr, + .set_wptr = vce_v1_0_ring_set_wptr, + .parse_cs = amdgpu_vce_ring_parse_cs, + .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ + .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ + .emit_ib = amdgpu_vce_ring_emit_ib, + .emit_fence = amdgpu_vce_ring_emit_fence, + .test_ring = amdgpu_vce_ring_test_ring, + .test_ib = amdgpu_vce_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, +}; + +static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vce.num_rings; i++) { + adev->vce.ring[i].funcs = &vce_v1_0_ring_funcs; + adev->vce.ring[i].me = i; + } +}; + +static const struct amdgpu_irq_src_funcs vce_v1_0_irq_funcs = { + .set = vce_v1_0_set_interrupt_state, + .process = vce_v1_0_process_interrupt, +}; + +static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->vce.irq.num_types = 1; + adev->vce.irq.funcs = &vce_v1_0_irq_funcs; +}; + +const struct amdgpu_ip_block_version vce_v1_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &vce_v1_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h new file mode 100644 index 000000000000..206e7bec897f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * Copyright 2025 Valve Corporation + * Copyright 2025 Alexandre Demers + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VCE_V1_0_H__ +#define __VCE_V1_0_H__ + +extern const struct amdgpu_ip_block_version vce_v1_0_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index bee3e904a6bc..8ea8a6193492 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -407,6 +407,11 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable, static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_early_init(adev); + if (r) + return r; adev->vce.num_rings = 2; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 708123899c41..719e9643c43d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -399,6 +399,7 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + int r; adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); @@ -407,6 +408,10 @@ static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block) (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) return -ENOENT; + r = amdgpu_vce_early_init(adev); + if (r) + return r; + adev->vce.num_rings = 3; vce_v3_0_set_ring_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 335bda64ff5b..2d64002bed61 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -410,6 +410,11 @@ static int vce_v4_0_stop(struct amdgpu_device *adev) static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_early_init(adev); + if (r) + return r; if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */ adev->vce.num_rings = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index eacf4e93ba2f..cb7123ec1a5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -141,7 +141,7 @@ static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block) adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); - if (amdgpu_dpm_reset_vcn_is_supported(adev)) + if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev)) adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index 714350cabf2f..8bd457dea4cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -122,7 +122,9 @@ static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block) switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(13, 0, 12): - if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev)) + if ((adev->psp.sos.fw_version >= 0x00450025) && + amdgpu_dpm_reset_vcn_is_supported(adev) && + !amdgpu_sriov_vf(adev)) adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 0f0719528bcc..22925df6a791 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2826,7 +2826,7 @@ retry: static int runtime_disable(struct kfd_process *p) { - int i = 0, ret; + int i = 0, ret = 0; bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED; p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED; @@ -2863,6 +2863,7 @@ static int runtime_disable(struct kfd_process *p) /* disable ttmp setup */ for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; + int last_err = 0; if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { pdd->spi_dbg_override = @@ -2872,14 +2873,17 @@ static int runtime_disable(struct kfd_process *p) pdd->dev->vm_info.last_vmid_kfd); if (!pdd->dev->kfd->shared_resources.enable_mes) - debug_refresh_runlist(pdd->dev->dqm); + last_err = debug_refresh_runlist(pdd->dev->dqm); else - kfd_dbg_set_mes_debug_mode(pdd, + last_err = kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); + + if (last_err) + ret = last_err; } } - return 0; + return ret; } static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 6e7bc983fc0b..d7a2e7178ea9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1897,6 +1897,8 @@ fail_packet_manager_init: static int stop_cpsch(struct device_queue_manager *dqm) { + int ret = 0; + dqm_lock(dqm); if (!dqm->sched_running) { dqm_unlock(dqm); @@ -1904,9 +1906,10 @@ static int stop_cpsch(struct device_queue_manager *dqm) } if (!dqm->dev->kfd->shared_resources.enable_mes) - unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); + ret = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, + 0, USE_DEFAULT_GRACE_PERIOD, false); else - remove_all_kfd_queues_mes(dqm); + ret = remove_all_kfd_queues_mes(dqm); dqm->sched_running = false; @@ -1920,7 +1923,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) dqm->detect_hang_info = NULL; dqm_unlock(dqm); - return 0; + return ret; } static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, @@ -2091,7 +2094,8 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm, while (*fence_addr != fence_value) { /* Fatal err detected, this response won't come */ - if (amdgpu_amdkfd_is_fed(dqm->dev->adev)) + if (amdgpu_amdkfd_is_fed(dqm->dev->adev) || + amdgpu_in_reset(dqm->dev->adev)) return -EIO; if (time_after(jiffies, end_jiffies)) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 82905f3e54dd..5a190dd6be4e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -748,16 +748,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, uint64_t *slots = page_slots(p->signal_page); uint32_t id; - /* - * If id is valid but slot is not signaled, GPU may signal the same event twice - * before driver have chance to process the first interrupt, then signal slot is - * auto-reset after set_event wakeup the user space, just drop the second event as - * the application only need wakeup once. - */ - if ((valid_id_bits > 31 || (1U << valid_id_bits) >= KFD_SIGNAL_EVENT_LIMIT) && - partial_id < KFD_SIGNAL_EVENT_LIMIT && slots[partial_id] == UNSIGNALED_EVENT_SLOT) - goto out_unlock; - if (valid_id_bits) pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n", partial_id, valid_id_bits); @@ -786,7 +776,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, } } -out_unlock: rcu_read_unlock(); kfd_unref_process(p); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 4ceb251312a6..d76fb61869c7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -28,6 +28,7 @@ #include "kfd_device_queue_manager.h" #include "kfd_smi_events.h" #include "amdgpu_ras.h" +#include "amdgpu_ras_mgr.h" /* * GFX9 SQ Interrupts @@ -228,7 +229,11 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, kfd_signal_poison_consumed_event(dev, pasid); - event_id = amdgpu_ras_acquire_event_id(dev->adev, type); + if (amdgpu_uniras_enabled(dev->adev)) + event_id = amdgpu_ras_mgr_gen_ras_event_seqno(dev->adev, + RAS_SEQNO_TYPE_POISON_CONSUMPTION); + else + event_id = amdgpu_ras_acquire_event_id(dev->adev, type); RAS_EVENT_LOG(dev->adev, event_id, "poison is consumed by client %d, kick off gpu reset flow\n", client_id); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 59a5a3fea65d..46c84fc60af1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -21,7 +21,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/types.h> -#include <linux/hmm.h> #include <linux/dma-direction.h> #include <linux/dma-mapping.h> #include <linux/migrate.h> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index 2eebf67f9c2c..2b7fd442d29c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -31,7 +31,6 @@ #include <linux/list.h> #include <linux/mutex.h> #include <linux/sched/mm.h> -#include <linux/hmm.h> #include "kfd_priv.h" #include "kfd_svm.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ddfe30c13e9d..a085faac9fe1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1083,7 +1083,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) * for auto suspend */ if (pdd->runtime_inuse) { - pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev); pdd->runtime_inuse = false; } @@ -1162,9 +1161,6 @@ static void kfd_process_wq_release(struct work_struct *work) release_work); struct dma_fence *ef; - kfd_process_dequeue_from_all_devices(p); - pqm_uninit(&p->pqm); - /* * If GPU in reset, user queues may still running, wait for reset complete. */ @@ -1226,6 +1222,14 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p) cancel_delayed_work_sync(&p->eviction_work); cancel_delayed_work_sync(&p->restore_work); + /* + * Dequeue and destroy user queues, it is not safe for GPU to access + * system memory after mmu release notifier callback returns because + * exit_mmap free process memory afterwards. + */ + kfd_process_dequeue_from_all_devices(p); + pqm_uninit(&p->pqm); + for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c index a65c67cf56ff..f1e7583650c4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c @@ -297,16 +297,16 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope goto out_err_unreserve; } - if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) { - pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n", + if (properties->ctx_save_restore_area_size < topo_dev->node_props.cwsr_size) { + pr_debug("queue cwsr size 0x%x not sufficient for node cwsr size 0x%x\n", properties->ctx_save_restore_area_size, topo_dev->node_props.cwsr_size); err = -EINVAL; goto out_err_unreserve; } - total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size) - * NUM_XCC(pdd->dev->xcc_mask); + total_cwsr_size = (properties->ctx_save_restore_area_size + + topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask); total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE); err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address, @@ -352,8 +352,8 @@ int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_prope topo_dev = kfd_topology_device_by_id(pdd->dev->id); if (!topo_dev) return -EINVAL; - total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size) - * NUM_XCC(pdd->dev->xcc_mask); + total_cwsr_size = (properties->ctx_save_restore_area_size + + topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask); total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE); kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9d72411c3379..97c2270f278f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1698,7 +1698,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, start = map_start << PAGE_SHIFT; end = (map_last + 1) << PAGE_SHIFT; for (addr = start; !r && addr < end; ) { - struct hmm_range *hmm_range = NULL; + struct amdgpu_hmm_range *range = NULL; unsigned long map_start_vma; unsigned long map_last_vma; struct vm_area_struct *vma; @@ -1737,9 +1737,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } WRITE_ONCE(p->svms.faulting_task, current); - r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, - readonly, owner, - &hmm_range); + range = amdgpu_hmm_range_alloc(NULL); + if (likely(range)) + r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, + readonly, owner, range); + else + r = -ENOMEM; WRITE_ONCE(p->svms.faulting_task, NULL); if (r) pr_debug("failed %d to get svm range pages\n", r); @@ -1750,7 +1753,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, if (!r) { offset = (addr >> PAGE_SHIFT) - prange->start; r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, - hmm_range->hmm_pfns); + range->hmm_range.hmm_pfns); if (r) pr_debug("failed %d to dma map range\n", r); } @@ -1758,14 +1761,17 @@ static int svm_range_validate_and_map(struct mm_struct *mm, svm_range_lock(prange); /* Free backing memory of hmm_range if it was initialized - * Overrride return value to TRY AGAIN only if prior returns + * Override return value to TRY AGAIN only if prior returns * were successful */ - if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) { + if (range && !amdgpu_hmm_range_valid(range) && !r) { pr_debug("hmm update the range, need validate again\n"); r = -EAGAIN; } + /* Free the hmm range */ + amdgpu_hmm_range_free(range); + if (!r && !list_empty(&prange->child_list)) { pr_debug("range split by unmap in parallel, validate again\n"); r = -EAGAIN; @@ -3687,6 +3693,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping); /* TODO: unmap ranges from GPU that lost access */ } + update_mapping |= !p->xnack_enabled && !list_empty(&remap_list); + list_for_each_entry_safe(prange, next, &remove_list, update_list) { pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, prange->start, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 01c7a4877904..a63dfc95b602 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -31,7 +31,6 @@ #include <linux/list.h> #include <linux/mutex.h> #include <linux/sched/mm.h> -#include <linux/hmm.h> #include "amdgpu.h" #include "kfd_priv.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5c98746eb72d..811636af14ea 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -530,7 +530,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", - dev->gpu->xcp ? + dev->gpu->xcp && + (dev->gpu->xcp->xcp_mgr->mode != + AMDGPU_SPX_PARTITION_MODE) ? dev->gpu->xcp->unique_id : dev->gpu->adev->unique_id); sysfs_show_32bit_prop(buffer, offs, "num_xcc", diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 7329b8cc2576..8e949fe77312 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -39,7 +39,8 @@ AMDGPUDM = \ amdgpu_dm_psr.o \ amdgpu_dm_replay.o \ amdgpu_dm_quirks.o \ - amdgpu_dm_wb.o + amdgpu_dm_wb.o \ + amdgpu_dm_colorop.o ifdef CONFIG_DRM_AMD_DC_FP AMDGPUDM += dc_fpu.o diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index bfa3199591b6..740711ac1037 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3392,6 +3392,67 @@ static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev, } } +/** + * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks + * @adev: amdgpu device pointer + * + * Iterates through all DC links and dumps information about local and remote + * (MST) sinks. Should be called after connector detection is complete to see + * the final state of all links. + */ +static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev) +{ + struct dc *dc = adev->dm.dc; + struct drm_device *dev = adev_to_drm(adev); + int li; + + if (!dc) + return; + + for (li = 0; li < dc->link_count; li++) { + struct dc_link *l = dc->links[li]; + const char *name = NULL; + int rs; + + if (!l) + continue; + if (l->local_sink && l->local_sink->edid_caps.display_name[0]) + name = l->local_sink->edid_caps.display_name; + else + name = "n/a"; + + drm_dbg_kms(dev, + "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n", + li, + l->local_sink, + l->type, + l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE, + l->sink_count, + name, + l->dpcd_caps.is_mst_capable, + l->mst_stream_alloc_table.stream_count); + + /* Dump remote (MST) sinks if any */ + for (rs = 0; rs < l->sink_count; rs++) { + struct dc_sink *rsink = l->remote_sinks[rs]; + const char *rname = NULL; + + if (!rsink) + continue; + if (rsink->edid_caps.display_name[0]) + rname = rsink->edid_caps.display_name; + else + rname = "n/a"; + drm_dbg_kms(dev, + " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n", + li, rs, + rsink, + rsink->sink_signal, + rname); + } + } +} + static int dm_resume(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -3563,6 +3624,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + bool init = false; if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; @@ -3572,10 +3634,23 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) aconnector->mst_root) continue; - drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); + scoped_guard(mutex, &aconnector->mst_mgr.lock) { + init = !aconnector->mst_mgr.mst_primary; + } + if (init) + dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link, false); + else + drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); } drm_connector_list_iter_end(&iter); + /* Debug dump: list all DC links and their associated sinks after detection + * is complete for all connectors. This provides a comprehensive view of the + * final state without repeating the dump for each connector. + */ + amdgpu_dm_dump_links_and_sinks(adev); + amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); @@ -3786,7 +3861,9 @@ void amdgpu_dm_update_connector_after_detect( drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", aconnector->connector_id, aconnector->dc_sink, sink); - guard(mutex)(&dev->mode_config.mutex); + /* When polling, DRM has already locked the mutex for us. */ + if (!drm_kms_helper_is_poll_worker()) + mutex_lock(&dev->mode_config.mutex); /* * 1. Update status of the drm connector @@ -3849,6 +3926,101 @@ void amdgpu_dm_update_connector_after_detect( } update_subconnector_property(aconnector); + + /* When polling, the mutex will be unlocked for us by DRM. */ + if (!drm_kms_helper_is_poll_worker()) + mutex_unlock(&dev->mode_config.mutex); +} + +static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2) +{ + if (!sink1 || !sink2) + return false; + if (sink1->sink_signal != sink2->sink_signal) + return false; + + if (sink1->dc_edid.length != sink2->dc_edid.length) + return false; + + if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid, + sink1->dc_edid.length) != 0) + return false; + return true; +} + + +/** + * DOC: hdmi_hpd_debounce_work + * + * HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD + * (such as during power save transitions), this delay determines how long to + * wait before processing the HPD event. This allows distinguishing between a + * physical unplug (>hdmi_hpd_debounce_delay) + * and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay). + * + * If the toggle is less than this delay, the driver compares sink capabilities + * and permits a hotplug event if they changed. + * + * The default value of 1500ms was chosen based on experimental testing with + * various monitors that exhibit spontaneous HPD toggling behavior. + */ +static void hdmi_hpd_debounce_work(struct work_struct *work) +{ + struct amdgpu_dm_connector *aconnector = + container_of(to_delayed_work(work), struct amdgpu_dm_connector, + hdmi_hpd_debounce_work); + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc *dc = aconnector->dc_link->ctx->dc; + bool fake_reconnect = false; + bool reallow_idle = false; + bool ret = false; + guard(mutex)(&aconnector->hpd_lock); + + /* Re-detect the display */ + scoped_guard(mutex, &adev->dm.dc_lock) { + if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) { + dc_allow_idle_optimizations(dc, false); + reallow_idle = true; + } + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + } + + if (ret) { + /* Apply workaround delay for certain panels */ + apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); + /* Compare sinks to determine if this was a spontaneous HPD toggle */ + if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) { + /* + * Sinks match - this was a spontaneous HDMI HPD toggle. + */ + drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n"); + fake_reconnect = true; + } + + /* Update connector state */ + amdgpu_dm_update_connector_after_detect(aconnector); + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + /* Only notify OS if sink actually changed */ + if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED) + drm_kms_helper_hotplug_event(dev); + } + + /* Release the cached sink reference */ + if (aconnector->hdmi_prev_sink) { + dc_sink_release(aconnector->hdmi_prev_sink); + aconnector->hdmi_prev_sink = NULL; + } + + scoped_guard(mutex, &adev->dm.dc_lock) { + if (reallow_idle && dc->caps.ips_support) + dc_allow_idle_optimizations(dc, true); + } } static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) @@ -3860,6 +4032,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); struct dc *dc = aconnector->dc_link->ctx->dc; bool ret = false; + bool debounce_required = false; if (adev->dm.disable_hpd_irq) return; @@ -3882,6 +4055,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); + /* + * Check for HDMI disconnect with debounce enabled. + */ + debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 && + dc_is_hdmi_signal(aconnector->dc_link->connector_signal) && + new_connection_type == dc_connection_none && + aconnector->dc_link->local_sink != NULL); + if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); @@ -3891,7 +4072,34 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) drm_kms_helper_connector_hotplug_event(connector); + } else if (debounce_required) { + /* + * HDMI disconnect detected - schedule delayed work instead of + * processing immediately. This allows us to coalesce spurious + * HDMI signals from physical unplugs. + */ + drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n", + aconnector->hdmi_hpd_debounce_delay_ms); + + /* Cache the current sink for later comparison */ + if (aconnector->hdmi_prev_sink) + dc_sink_release(aconnector->hdmi_prev_sink); + aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink; + if (aconnector->hdmi_prev_sink) + dc_sink_retain(aconnector->hdmi_prev_sink); + + /* Schedule delayed detection. */ + if (mod_delayed_work(system_wq, + &aconnector->hdmi_hpd_debounce_work, + msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms))) + drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n"); + } else { + + /* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */ + if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work)) + return; + scoped_guard(mutex, &adev->dm.dc_lock) { dc_exit_ips_for_hw_access(dc); ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); @@ -4917,6 +5125,21 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, struct dc_link *link; u32 brightness; bool rc, reallow_idle = false; + struct drm_connector *connector; + + list_for_each_entry(connector, &dm->ddev->mode_config.connector_list, head) { + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (aconnector->bl_idx != bl_idx) + continue; + + /* if connector is off, save the brightness for next time it's on */ + if (!aconnector->base.encoder) { + dm->brightness[bl_idx] = user_brightness; + dm->actual_brightness[bl_idx] = 0; + return; + } + } amdgpu_dm_update_backlight_caps(dm, bl_idx); caps = &dm->backlight_caps[bl_idx]; @@ -5133,6 +5356,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, static void setup_backlight_device(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector) { + struct amdgpu_dm_backlight_caps *caps; struct dc_link *link = aconnector->dc_link; int bl_idx = dm->num_of_edps; @@ -5152,6 +5376,13 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm, dm->num_of_edps++; update_connector_ext_caps(aconnector); + caps = &dm->backlight_caps[aconnector->bl_idx]; + + /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */ + if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0) + drm_object_attach_property(&aconnector->base.base, + dm->adev->mode_info.abm_level_property, + ABM_SYSFS_CONTROL); } static void amdgpu_set_panel_orientation(struct drm_connector *connector); @@ -5407,6 +5638,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) amdgpu_set_panel_orientation(&aconnector->base); } + /* Debug dump: list all DC links and their associated sinks after detection + * is complete for all connectors. This provides a comprehensive view of the + * final state without repeating the dump for each connector. + */ + amdgpu_dm_dump_links_and_sinks(adev); + /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) @@ -5793,6 +6030,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, *color_space = COLOR_SPACE_SRGB; + /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */ + if (plane_state->state && plane_state->state->plane_color_pipeline) + return 0; + /* DRM color properties only affect non-RGB formats. */ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return 0; @@ -7145,29 +7386,117 @@ finish: return stream; } +/** + * amdgpu_dm_connector_poll - Poll a connector to see if it's connected to a display + * @aconnector: DM connector to poll (owns @base drm_connector and @dc_link) + * @force: if true, force polling even when DAC load detection was used + * + * Used for connectors that don't support HPD (hotplug detection) to + * periodically check whether the connector is connected to a display. + * + * When connection was determined via DAC load detection, we avoid + * re-running it on normal polls to prevent visible glitches, unless + * @force is set. + * + * Return: The probed connector status (connected/disconnected/unknown). + */ static enum drm_connector_status -amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) +amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force) { - bool connected; - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc_link *link = aconnector->dc_link; + enum dc_connection_type conn_type = dc_connection_none; + enum drm_connector_status status = connector_status_disconnected; - /* - * Notes: - * 1. This interface is NOT called in context of HPD irq. - * 2. This interface *is called* in context of user-mode ioctl. Which - * makes it a bad place for *any* MST-related activity. + /* When we determined the connection using DAC load detection, + * do NOT poll the connector do detect disconnect because + * that would run DAC load detection again which can cause + * visible visual glitches. + * + * Only allow to poll such a connector again when forcing. */ + if (!force && link->local_sink && link->type == dc_connection_dac_load) + return connector->status; - if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && - !aconnector->fake_enable) - connected = (aconnector->dc_sink != NULL); - else - connected = (aconnector->base.force == DRM_FORCE_ON || - aconnector->base.force == DRM_FORCE_ON_DIGITAL); + mutex_lock(&aconnector->hpd_lock); + + if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) && + conn_type != dc_connection_none) { + mutex_lock(&adev->dm.dc_lock); + + /* Only call full link detection when a sink isn't created yet, + * ie. just when the display is plugged in, otherwise we risk flickering. + */ + if (link->local_sink || + dc_link_detect(link, DETECT_REASON_HPD)) + status = connector_status_connected; + + mutex_unlock(&adev->dm.dc_lock); + } + + if (connector->status != status) { + if (status == connector_status_disconnected) { + if (link->local_sink) + dc_sink_release(link->local_sink); + + link->local_sink = NULL; + link->dpcd_sink_count = 0; + link->type = dc_connection_none; + } + + amdgpu_dm_update_connector_after_detect(aconnector); + } + + mutex_unlock(&aconnector->hpd_lock); + return status; +} + +/** + * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display + * + * A connector is considered connected when it has a sink that is not NULL. + * For connectors that support HPD (hotplug detection), the connection is + * handled in the HPD interrupt. + * For connectors that may not support HPD, such as analog connectors, + * DRM will call this function repeatedly to poll them. + * + * Notes: + * 1. This interface is NOT called in context of HPD irq. + * 2. This interface *is called* in context of user-mode ioctl. Which + * makes it a bad place for *any* MST-related activity. + * + * @connector: The DRM connector we are checking. We convert it to + * amdgpu_dm_connector so we can read the DC link and state. + * @force: If true, do a full detect again. This is used even when + * a lighter check would normally be used to avoid flicker. + * + * Return: The connector status (connected, disconnected, or unknown). + * + */ +static enum drm_connector_status +amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); update_subconnector_property(aconnector); - return (connected ? connector_status_connected : + if (aconnector->base.force == DRM_FORCE_ON || + aconnector->base.force == DRM_FORCE_ON_DIGITAL) + return connector_status_connected; + else if (aconnector->base.force == DRM_FORCE_OFF) + return connector_status_disconnected; + + /* Poll analog connectors and only when either + * disconnected or connected to an analog display. + */ + if (drm_kms_helper_is_poll_worker() && + dc_connector_supports_analog(aconnector->dc_link->link_id.id) && + (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog)) + return amdgpu_dm_connector_poll(aconnector, force); + + return (aconnector->dc_sink ? connector_status_connected : connector_status_disconnected); } @@ -7218,6 +7547,20 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { dm_new_state->underscan_enable = val; ret = 0; + } else if (property == adev->mode_info.abm_level_property) { + switch (val) { + case ABM_SYSFS_CONTROL: + dm_new_state->abm_sysfs_forbidden = false; + break; + case ABM_LEVEL_OFF: + dm_new_state->abm_sysfs_forbidden = true; + dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; + break; + default: + dm_new_state->abm_sysfs_forbidden = true; + dm_new_state->abm_level = val; + } + ret = 0; } return ret; @@ -7260,6 +7603,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { *val = dm_state->underscan_enable; ret = 0; + } else if (property == adev->mode_info.abm_level_property) { + if (!dm_state->abm_sysfs_forbidden) + *val = ABM_SYSFS_CONTROL; + else + *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ? + dm_state->abm_level : 0; + ret = 0; } return ret; @@ -7312,10 +7662,16 @@ static ssize_t panel_power_savings_store(struct device *device, return -EINVAL; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - to_dm_connector_state(connector->state)->abm_level = val ?: - ABM_LEVEL_IMMEDIATE_DISABLE; + if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden) + ret = -EBUSY; + else + to_dm_connector_state(connector->state)->abm_level = val ?: + ABM_LEVEL_IMMEDIATE_DISABLE; drm_modeset_unlock(&dev->mode_config.connection_mutex); + if (ret) + return ret; + drm_kms_helper_hotplug_event(dev); return count; @@ -7380,6 +7736,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) if (aconnector->mst_mgr.dev) drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); + /* Cancel and flush any pending HDMI HPD debounce work */ + cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work); + if (aconnector->hdmi_prev_sink) { + dc_sink_release(aconnector->hdmi_prev_sink); + aconnector->hdmi_prev_sink = NULL; + } + if (aconnector->bl_idx != -1) { backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); dm->backlight_dev[aconnector->bl_idx] = NULL; @@ -8030,7 +8393,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, "mode %dx%d@%dHz is not native, enabling scaling\n", adjusted_mode->hdisplay, adjusted_mode->vdisplay, drm_mode_vrefresh(adjusted_mode)); - dm_new_connector_state->scaling = RMX_FULL; + dm_new_connector_state->scaling = RMX_ASPECT; } return 0; } @@ -8155,7 +8518,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, return 0; } -static int to_drm_connector_type(enum signal_type st) +static int to_drm_connector_type(enum signal_type st, uint32_t connector_id) { switch (st) { case SIGNAL_TYPE_HDMI_TYPE_A: @@ -8171,6 +8534,10 @@ static int to_drm_connector_type(enum signal_type st) return DRM_MODE_CONNECTOR_DisplayPort; case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_DVI_SINGLE_LINK: + if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII || + connector_id == CONNECTOR_ID_DUAL_LINK_DVII) + return DRM_MODE_CONNECTOR_DVII; + return DRM_MODE_CONNECTOR_DVID; case SIGNAL_TYPE_VIRTUAL: return DRM_MODE_CONNECTOR_VIRTUAL; @@ -8222,7 +8589,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector) static struct drm_display_mode * amdgpu_dm_create_common_mode(struct drm_encoder *encoder, - char *name, + const char *name, int hdisplay, int vdisplay) { struct drm_device *dev = encoder->dev; @@ -8244,6 +8611,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder, } +static const struct amdgpu_dm_mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; +} common_modes[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} +}; + static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector) { @@ -8254,23 +8639,6 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, to_amdgpu_dm_connector(connector); int i; int n; - struct mode_size { - char name[DRM_DISPLAY_MODE_LEN]; - int w; - int h; - } common_modes[] = { - { "640x480", 640, 480}, - { "800x600", 800, 600}, - { "1024x768", 1024, 768}, - { "1280x720", 1280, 720}, - { "1280x800", 1280, 800}, - {"1280x1024", 1280, 1024}, - { "1440x900", 1440, 900}, - {"1680x1050", 1680, 1050}, - {"1600x1200", 1600, 1200}, - {"1920x1080", 1920, 1080}, - {"1920x1200", 1920, 1200} - }; if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) && (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)) @@ -8471,6 +8839,16 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect if (!(amdgpu_freesync_vid_mode && drm_edid)) return; + if (!amdgpu_dm_connector->dc_sink || !amdgpu_dm_connector->dc_link) + return; + + if (!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version)) + return; + + if (dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id) && + amdgpu_dm_connector->dc_sink->edid_caps.analog) + return; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) amdgpu_dm_connector->num_modes += add_fs_modes(amdgpu_dm_connector); @@ -8480,11 +8858,11 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + struct dc_link *dc_link = amdgpu_dm_connector->dc_link; struct drm_encoder *encoder; const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; - struct dc_link_settings *verified_link_cap = - &amdgpu_dm_connector->dc_link->verified_link_cap; - const struct dc *dc = amdgpu_dm_connector->dc_link->dc; + struct dc_link_settings *verified_link_cap = &dc_link->verified_link_cap; + const struct dc *dc = dc_link->dc; encoder = amdgpu_dm_connector_to_encoder(connector); @@ -8494,6 +8872,17 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) amdgpu_dm_connector->num_modes += drm_add_modes_noedid(connector, 1920, 1080); + + if (amdgpu_dm_connector->dc_sink && + amdgpu_dm_connector->dc_sink->edid_caps.analog && + dc_connector_supports_analog(dc_link->link_id.id)) { + /* Analog monitor connected by DAC load detection. + * Add common modes. It will be up to the user to select one that works. + */ + for (int i = 0; i < ARRAY_SIZE(common_modes); i++) + amdgpu_dm_connector->num_modes += drm_add_modes_noedid( + connector, common_modes[i].w, common_modes[i].h); + } } else { amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); if (encoder) @@ -8541,6 +8930,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, mutex_init(&aconnector->hpd_lock); mutex_init(&aconnector->handle_mst_msg_ready); + aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS; + INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work); + aconnector->hdmi_prev_sink = NULL; + /* * configure support HPD hot plug connector_>polled default value is 0 * which means HPD hot plug not supported @@ -8562,6 +8955,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, case DRM_MODE_CONNECTOR_DVID: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; break; + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_VGA: + aconnector->base.polled = + DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + break; default: break; } @@ -8763,7 +9161,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, goto out_free; } - connector_type = to_drm_connector_type(link->connector_signal); + connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id); res = drm_connector_init_with_ddc( dm->ddev, @@ -10519,7 +10917,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * Here we create an empty update on each plane. * To fix this, DC should permit updating only stream properties. */ - dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); + dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL); if (!dummy_updates) { drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n"); continue; @@ -12446,7 +12844,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, int j = state->num_private_objs-1; dm_atomic_destroy_state(obj, - state->private_objs[i].state); + state->private_objs[i].state_to_destroy); /* If i is not at the end of the array then the * last element needs to be moved to where i was @@ -12457,7 +12855,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, state->private_objs[j]; state->private_objs[j].ptr = NULL; - state->private_objs[j].state = NULL; + state->private_objs[j].state_to_destroy = NULL; state->private_objs[j].old_state = NULL; state->private_objs[j].new_state = NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index db75e991ac7b..ef97cede9926 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -59,6 +59,7 @@ #define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL) +#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500 /* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" @@ -819,6 +820,11 @@ struct amdgpu_dm_connector { bool pack_sdp_v1_3; enum adaptive_sync_type as_type; struct amdgpu_hdmi_vsdb_info vsdb_info; + + /* HDMI HPD debounce support */ + unsigned int hdmi_hpd_debounce_delay_ms; + struct delayed_work hdmi_hpd_debounce_work; + struct dc_sink *hdmi_prev_sink; }; static inline void amdgpu_dm_set_mst_status(uint8_t *status, @@ -993,6 +999,7 @@ struct dm_connector_state { bool underscan_enable; bool freesync_capable; bool update_hdcp; + bool abm_sysfs_forbidden; uint8_t abm_level; int vcpi_slots; uint64_t pbn; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a4ac6d442278..1dcc79b35225 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -26,12 +26,39 @@ #include "amdgpu.h" #include "amdgpu_mode.h" #include "amdgpu_dm.h" +#include "amdgpu_dm_colorop.h" #include "dc.h" #include "modules/color/color_gamma.h" /** * DOC: overview * + * We have three types of color management in the AMD display driver. + * 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties + * 2. AMD driver private color management on &drm_plane and &drm_crtc + * 3. AMD plane color pipeline + * + * The CRTC properties are the original color management. When they were + * implemented per-plane color management was not a thing yet. Because + * of that we could get away with plumbing the DEGAMMA and CTM + * properties to pre-blending HW functions. This is incompatible with + * per-plane color management, such as via the AMD private properties or + * the new drm_plane color pipeline. The only compatible CRTC property + * with per-plane color management is the GAMMA property as it is + * applied post-blending. + * + * The AMD driver private color management properties are only exposed + * when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They + * are temporary building blocks on the path to full-fledged &drm_plane + * and &drm_crtc color pipelines and lay the driver's groundwork for the + * color pipelines. + * + * The AMD plane color pipeline describes AMD's &drm_colorops via the + * &drm_plane's COLOR_PIPELINE property. + * + * drm_crtc Properties + * ------------------- + * * The DC interface to HW gives us the following color management blocks * per pipe (surface): * @@ -42,36 +69,93 @@ * - Surface regamma LUT (normalized) * - Output CSC (normalized) * - * But these aren't a direct mapping to DRM color properties. The current DRM - * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware - * is essentially giving: + * But these aren't a direct mapping to DRM color properties. The + * current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma + * while our hardware is essentially giving: * * Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM * - * The input gamma LUT block isn't really applicable here since it operates - * on the actual input data itself rather than the HW fp representation. The - * input and output CSC blocks are technically available to use as part of - * the DC interface but are typically used internally by DC for conversions - * between color spaces. These could be blended together with user - * adjustments in the future but for now these should remain untouched. + * The input gamma LUT block isn't really applicable here since it + * operates on the actual input data itself rather than the HW fp + * representation. The input and output CSC blocks are technically + * available to use as part of the DC interface but are typically used + * internally by DC for conversions between color spaces. These could be + * blended together with user adjustments in the future but for now + * these should remain untouched. + * + * The pipe blending also happens after these blocks so we don't + * actually support any CRTC props with correct blending with multiple + * planes - but we can still support CRTC color management properties in + * DM in most single plane cases correctly with clever management of the + * DC interface in DM. + * + * As per DRM documentation, blocks should be in hardware bypass when + * their respective property is set to NULL. A linear DGM/RGM LUT should + * also considered as putting the respective block into bypass mode. + * + * This means that the following configuration is assumed to be the + * default: + * + * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC + * DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass + * + * AMD Private Color Management on drm_plane + * ----------------------------------------- + * + * The AMD private color management properties on a &drm_plane are: * - * The pipe blending also happens after these blocks so we don't actually - * support any CRTC props with correct blending with multiple planes - but we - * can still support CRTC color management properties in DM in most single - * plane cases correctly with clever management of the DC interface in DM. + * - AMD_PLANE_DEGAMMA_LUT + * - AMD_PLANE_DEGAMMA_LUT_SIZE + * - AMD_PLANE_DEGAMMA_TF + * - AMD_PLANE_HDR_MULT + * - AMD_PLANE_CTM + * - AMD_PLANE_SHAPER_LUT + * - AMD_PLANE_SHAPER_LUT_SIZE + * - AMD_PLANE_SHAPER_TF + * - AMD_PLANE_LUT3D + * - AMD_PLANE_LUT3D_SIZE + * - AMD_PLANE_BLEND_LUT + * - AMD_PLANE_BLEND_LUT_SIZE + * - AMD_PLANE_BLEND_TF * - * As per DRM documentation, blocks should be in hardware bypass when their - * respective property is set to NULL. A linear DGM/RGM LUT should also - * considered as putting the respective block into bypass mode. + * The AMD private color management property on a &drm_crtc is: * - * This means that the following - * configuration is assumed to be the default: + * - AMD_CRTC_REGAMMA_TF + * + * Use of these properties is discouraged. + * + * AMD plane color pipeline + * ------------------------ + * + * The AMD &drm_plane color pipeline is advertised for DCN generations + * 3.0 and newer. It exposes these elements in this order: + * + * 1. 1D curve colorop + * 2. Multiplier + * 3. 3x4 CTM + * 4. 1D curve colorop + * 5. 1D LUT + * 6. 3D LUT + * 7. 1D curve colorop + * 8. 1D LUT + * + * The multiplier (#2) is a simple multiplier that is applied to all + * channels. + * + * The 3x4 CTM (#3) is a simple 3x4 matrix. + * + * #1, and #7 are non-linear to linear curves. #4 is a linear to + * non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or + * their inverse. + * + * The 1D LUTs (#5 and #8) are plain 4096 entry LUTs. + * + * The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT. * - * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... - * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass */ #define MAX_DRM_LUT_VALUE 0xFFFF +#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF #define SDR_WHITE_LEVEL_INIT_VALUE 80 /** @@ -342,6 +426,21 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size) } /** + * __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob. + * @blob: DRM color mgmt property blob + * @size: lut size + * + * Returns: + * DRM LUT or NULL + */ +static const struct drm_color_lut32 * +__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size) +{ + *size = blob ? drm_color_lut32_size(blob) : 0; + return blob ? (struct drm_color_lut32 *)blob->data : NULL; +} + +/** * __is_lut_linear - check if the given lut is a linear mapping of values * @lut: given lut to check values * @size: lut size @@ -415,6 +514,24 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, } /** + * __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma. + * @lut: DRM lookup table for color conversion + * @gamma: DC gamma to set entries + * + * The conversion depends on the size of the lut - whether or not it's legacy. + */ +static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma) +{ + int i; + + for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) { + gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE); + gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE); + gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE); + } +} + +/** * __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix * @ctm: DRM color transformation matrix * @matrix: DC CSC float matrix @@ -566,6 +683,63 @@ static int __set_output_tf(struct dc_transfer_func *func, return res ? 0 : -ENOMEM; } +/** + * __set_output_tf_32 - calculates the output transfer function based on expected input space. + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut + * @has_rom: if ROM can be used for hardcoded curve + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ +static int __set_output_tf_32(struct dc_transfer_func *func, + const struct drm_color_lut32 *lut, uint32_t lut_size, + bool has_rom) +{ + struct dc_gamma *gamma = NULL; + struct calculate_buffer cal_buffer = {0}; + bool res; + + cal_buffer.buffer_index = -1; + + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->num_entries = lut_size; + __drm_lut32_to_dc_gamma(lut, gamma); + } + + if (func->tf == TRANSFER_FUNCTION_LINEAR) { + /* + * Color module doesn't like calculating regamma params + * on top of a linear input. But degamma params can be used + * instead to simulate this. + */ + if (gamma) + gamma->type = GAMMA_CUSTOM; + res = mod_color_calculate_degamma_params(NULL, func, + gamma, gamma != NULL); + } else { + /* + * Assume sRGB. The actual mapping will depend on whether the + * input was legacy or not. + */ + if (gamma) + gamma->type = GAMMA_CS_TFM_1D; + res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL, + has_rom, NULL, &cal_buffer); + } + + if (gamma) + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + + static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf, const struct drm_color_lut *regamma_lut, uint32_t regamma_size, bool has_rom, @@ -638,6 +812,42 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f return res ? 0 : -ENOMEM; } +/** + * __set_input_tf_32 - calculates the input transfer function based on expected + * input space. + * @caps: dc color capabilities + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut. + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ +static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func, + const struct drm_color_lut32 *lut, uint32_t lut_size) +{ + struct dc_gamma *gamma = NULL; + bool res; + + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->type = GAMMA_CUSTOM; + gamma->num_entries = lut_size; + + __drm_lut32_to_dc_gamma(lut, gamma); + } + + res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL); + + if (gamma) + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + static enum dc_transfer_func_predefined amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) { @@ -667,6 +877,27 @@ amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) } } +static enum dc_transfer_func_predefined +amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf) +{ + switch (tf) { + case DRM_COLOROP_1D_CURVE_SRGB_EOTF: + case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF: + return TRANSFER_FUNCTION_SRGB; + case DRM_COLOROP_1D_CURVE_PQ_125_EOTF: + case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF: + return TRANSFER_FUNCTION_PQ; + case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF: + case DRM_COLOROP_1D_CURVE_BT2020_OETF: + return TRANSFER_FUNCTION_BT709; + case DRM_COLOROP_1D_CURVE_GAMMA22: + case DRM_COLOROP_1D_CURVE_GAMMA22_INV: + return TRANSFER_FUNCTION_GAMMA22; + default: + return TRANSFER_FUNCTION_LINEAR; + } +} + static void __to_dc_lut3d_color(struct dc_rgb *rgb, const struct drm_color_lut lut, int bit_precision) @@ -720,6 +951,59 @@ static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut, __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); } +static void __to_dc_lut3d_32_color(struct dc_rgb *rgb, + const struct drm_color_lut32 lut, + int bit_precision) +{ + rgb->red = drm_color_lut32_extract(lut.red, bit_precision); + rgb->green = drm_color_lut32_extract(lut.green, bit_precision); + rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision); +} + +static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut, + uint32_t lut3d_size, + struct tetrahedral_params *params, + bool use_tetrahedral_9, + int bit_depth) +{ + struct dc_rgb *lut0; + struct dc_rgb *lut1; + struct dc_rgb *lut2; + struct dc_rgb *lut3; + int lut_i, i; + + + if (use_tetrahedral_9) { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + } else { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + } + + for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) { + /* + * We should consider the 3D LUT RGB values are distributed + * along four arrays lut0-3 where the first sizes 1229 and the + * other 1228. The bit depth supported for 3dlut channel is + * 12-bit, but DC also supports 10-bit. + * + * TODO: improve color pipeline API to enable the userspace set + * bit depth and 3D LUT size/stride, as specified by VA-API. + */ + __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth); + __to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth); + __to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth); + __to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth); + } + /* lut0 has 1229 points (lut_size/4 + 1) */ + __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth); +} + /* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream * @drm_lut3d: user 3D LUT * @drm_lut3d_size: size of 3D LUT @@ -1178,6 +1462,360 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state, } static int +__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state, + struct drm_colorop_state *colorop_state) +{ + struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func; + struct drm_colorop *colorop = colorop_state->colorop; + struct drm_device *drm = colorop->dev; + + if (colorop->type != DRM_COLOROP_1D_CURVE) + return -EINVAL; + + if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) + return -EINVAL; + + if (colorop_state->bypass) { + tf->type = TF_TYPE_BYPASS; + tf->tf = TRANSFER_FUNCTION_LINEAR; + return 0; + } + + drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id); + + tf->type = TF_TYPE_PREDEFINED; + tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + + return 0; +} + +static int +__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + int i = 0; + + old_colorop = colorop; + + /* 1st op: 1d curve - degamma */ + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (!colorop_state) + return -EINVAL; + + return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state); +} + +static int +__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + const struct drm_device *dev = colorop->dev; + const struct drm_property_blob *blob; + struct drm_color_ctm_3x4 *ctm = NULL; + int i = 0; + + /* 3x4 matrix */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) { + drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id); + blob = colorop_state->data; + if (blob->length == sizeof(struct drm_color_ctm_3x4)) { + ctm = (struct drm_color_ctm_3x4 *) blob->data; + __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix); + dc_plane_state->gamut_remap_matrix.enable_remap = true; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } else { + drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n", + blob->length, sizeof(struct drm_color_ctm_3x4)); + return -EINVAL; + } + } + + return 0; +} + +static int +__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + const struct drm_device *dev = colorop->dev; + int i = 0; + + /* Multiplier */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) { + drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id); + dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier); + } + + return 0; +} + +static int +__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR; + struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func; + const struct drm_color_lut32 *shaper_lut; + struct drm_device *dev = colorop->dev; + bool enabled = false; + u32 shaper_size; + int i = 0, ret = 0; + + /* 1D Curve - SHAPER TF */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) { + drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + ret = __set_output_tf(tf, 0, 0, false); + if (ret) + return ret; + enabled = true; + } + + /* 1D LUT - SHAPER LUT */ + colorop = old_colorop->next; + if (!colorop) { + drm_dbg(dev, "no Shaper LUT colorop found\n"); + return -EINVAL; + } + + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) { + drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size); + shaper_size = shaper_lut != NULL ? shaper_size : 0; + + /* Custom LUT size must be the same as supported size */ + if (shaper_size == colorop->size) { + ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false); + if (ret) + return ret; + enabled = true; + } + } + + if (!enabled) + tf->type = TF_TYPE_BYPASS; + + return 0; +} + +/* __set_colorop_3dlut - set DRM 3D LUT to DC stream + * @drm_lut3d: user 3D LUT + * @drm_lut3d_size: size of 3D LUT + * @lut3d: DC 3D LUT + * + * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it + * on DCN accordingly. + * + * Returns: + * 0 on success. -EINVAL if drm_lut3d_size is zero. + */ +static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d, + uint32_t drm_lut3d_size, + struct dc_3dlut *lut) +{ + if (!drm_lut3d_size) { + lut->state.bits.initialized = 0; + return -EINVAL; + } + + /* Only supports 17x17x17 3D LUT (12-bit) now */ + lut->lut_3d.use_12bits = true; + lut->lut_3d.use_tetrahedral_9 = false; + + lut->state.bits.initialized = 1; + __drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d, + lut->lut_3d.use_tetrahedral_9, 12); + + return 0; +} + +static int +__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func; + struct drm_atomic_state *state = plane_state->state; + const struct amdgpu_device *adev = drm_to_adev(colorop->dev); + const struct drm_device *dev = colorop->dev; + const struct drm_color_lut32 *lut3d; + uint32_t lut3d_size; + int i = 0, ret = 0; + + /* 3D LUT */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) { + if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) { + drm_dbg(dev, "3D LUT is not supported by hardware\n"); + return -EINVAL; + } + + drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id); + lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size); + lut3d_size = lut3d != NULL ? lut3d_size : 0; + ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func); + if (ret) { + drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n", + colorop->base.id, lut3d_size); + return ret; + } + + /* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve + * with TRANSFER_FUNCTION_LINEAR + */ + if (tf->type == TF_TYPE_BYPASS) { + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = TRANSFER_FUNCTION_LINEAR; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + ret = __set_output_tf_32(tf, NULL, 0, false); + } + } + + return ret; +} + +static int +__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR; + struct dc_transfer_func *tf = &dc_plane_state->blend_tf; + const struct drm_color_lut32 *blend_lut = NULL; + struct drm_device *dev = colorop->dev; + uint32_t blend_size = 0; + int i = 0; + + /* 1D Curve - BLND TF */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE && + (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + __set_input_tf_32(NULL, tf, blend_lut, blend_size); + } + + /* 1D Curve - BLND LUT */ + colorop = old_colorop->next; + if (!colorop) { + drm_dbg(dev, "no Blend LUT colorop found\n"); + return -EINVAL; + } + + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT && + (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size); + blend_size = blend_lut != NULL ? blend_size : 0; + + /* Custom LUT size must be the same as supported size */ + if (blend_size == colorop->size) + __set_input_tf_32(NULL, tf, blend_lut, blend_size); + } + + return 0; +} + +static int amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, struct dc_plane_state *dc_plane_state) { @@ -1227,6 +1865,93 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, return 0; } +static int +amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct drm_colorop *colorop = plane_state->color_pipeline; + struct drm_device *dev = plane_state->plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + int ret; + + /* 1D Curve - DEGAM TF */ + if (!colorop) + return -EINVAL; + + ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* Multiplier */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no multiplier colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* 3x4 matrix */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no 3x4 matrix colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + if (adev->dm.dc->caps.color.dpp.hw_3d_lut) { + /* 1D Curve & LUT - SHAPER TF & LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no Shaper TF colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* Shaper LUT colorop is already handled, just skip here */ + colorop = colorop->next; + if (!colorop) + return -EINVAL; + + /* 3D LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no 3D LUT colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + } + + /* 1D Curve & LUT - BLND TF & LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no Blend TF colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* BLND LUT colorop is already handled, just skip here */ + colorop = colorop->next; + if (!colorop) + return -EINVAL; + + return 0; +} + /** * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. * @crtc: amdgpu_dm crtc state @@ -1323,5 +2048,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, dc_plane_state->input_csc_color_matrix.enable_adjustment = false; } + if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state)) + return 0; + return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c new file mode 100644 index 000000000000..d585618b8064 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/drm_print.h> +#include <drm/drm_plane.h> +#include <drm/drm_property.h> +#include <drm/drm_colorop.h> + +#include "amdgpu.h" +#include "amdgpu_dm_colorop.h" +#include "dc.h" + +const u64 amdgpu_dm_supported_degam_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); + +const u64 amdgpu_dm_supported_shaper_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22); + +const u64 amdgpu_dm_supported_blnd_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); + +#define MAX_COLOR_PIPELINE_OPS 10 + +#define LUT3D_SIZE 17 + +int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list) +{ + struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS]; + struct drm_device *dev = plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + int ret; + int i = 0; + + memset(ops, 0, sizeof(ops)); + + /* 1D curve - DEGAM TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_degam_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + list->type = ops[i]->base.id; + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); + + i++; + + /* Multiplier */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 3x4 matrix */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + if (adev->dm.dc->caps.color.dpp.hw_3d_lut) { + /* 1D curve - SHAPER TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_shaper_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 1D LUT - SHAPER LUT */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 3D LUT */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE, + DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + } + + /* 1D curve - BLND TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_blnd_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i - 1], ops[i]); + + i++; + + /* 1D LUT - BLND LUT */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + return 0; + +cleanup: + if (ret == -ENOMEM) + drm_err(plane->dev, "KMS: Failed to allocate colorop\n"); + + drm_colorop_pipeline_destroy(dev); + + return ret; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h new file mode 100644 index 000000000000..2e1617ffc8ee --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_COLOROP_H__ +#define __AMDGPU_DM_COLOROP_H__ + +extern const u64 amdgpu_dm_supported_degam_tfs; +extern const u64 amdgpu_dm_supported_shaper_tfs; +extern const u64 amdgpu_dm_supported_blnd_tfs; + +int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list); + +#endif /* __AMDGPU_DM_COLOROP_H__*/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 38f9ea313dcb..697e232acebf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -736,7 +736,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, { struct amdgpu_crtc *acrtc = NULL; struct drm_plane *cursor_plane; - bool is_dcn; + bool has_degamma; int res = -ENOMEM; cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); @@ -775,20 +775,18 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, dm->adev->mode_info.crtcs[crtc_index] = acrtc; - /* Don't enable DRM CRTC degamma property for DCE since it doesn't - * support programmable degamma anywhere. + /* Don't enable DRM CRTC degamma property for + * 1. Degamma is replaced by color pipeline. + * 2. DCE since it doesn't support programmable degamma anywhere. + * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor. */ - is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; - /* Dont't enable DRM CRTC degamma property for DCN401 since the - * pre-blending degamma LUT doesn't apply to cursor, and therefore - * can't work similar to a post-blending degamma LUT as in other hw - * versions. - * TODO: revisit it once KMS plane color API is merged. - */ - drm_crtc_enable_color_mgmt(&acrtc->base, - (is_dcn && - dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ? - MAX_COLOR_LUT_ENTRIES : 0, + if (plane->color_pipeline_property) + has_degamma = false; + else + has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch && + dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01; + + drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index f263e1a4537e..a9839485f2a2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -759,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us int max_param_num = 11; enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; bool disable_hpd = false; + bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID; bool valid_test_pattern = false; uint8_t param_nums = 0; /* init with default 80bit custom pattern */ @@ -850,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us * because it might have been disabled after a test pattern was set. * AUX depends on HPD * sequence dependent, do not move! */ - if (!disable_hpd) + if (supports_hpd && !disable_hpd) dc_link_enable_hpd(link); prefer_link_settings.lane_count = link->verified_link_cap.lane_count; @@ -888,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us * Need disable interrupt to avoid SW driver disable DP output. This is * done after the test pattern is set. */ - if (valid_test_pattern && disable_hpd) + if (valid_test_pattern && supports_hpd && disable_hpd) dc_link_disable_hpd(link); kfree(wr_buf); @@ -1302,7 +1303,8 @@ static int odm_combine_segments_show(struct seq_file *m, void *unused) if (connector->status != connector_status_connected) return -ENODEV; - if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments) + if (pipe_ctx && pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments) pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments); seq_printf(m, "%d\n", segments); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 19038f336155..85ce558cefc5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -201,6 +201,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, struct mod_hdcp_link_adjustment link_adjust; struct mod_hdcp_display_adjustment display_adjust; unsigned int conn_index = aconnector->base.index; + const struct dc *dc = aconnector->dc_link->dc; guard(mutex)(&hdcp_w->mutex); drm_connector_get(&aconnector->base); @@ -231,6 +232,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, link_adjust.hdcp1.disable = 1; link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; } + link_adjust.hdcp2.use_fw_locality_check = + (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable); + link_adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback; schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); @@ -534,6 +538,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct dc_sink *sink = NULL; bool link_is_hdcp14 = false; + const struct dc *dc = aconnector->dc_link->dc; if (config->dpms_off) { hdcp_remove_display(hdcp_work, link_index, aconnector); @@ -575,6 +580,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->adjust.auth_delay = 2; link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS; link->adjust.hdcp1.disable = 0; + link->adjust.hdcp2.use_fw_locality_check = (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable); + link->adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback; hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index, @@ -786,15 +793,8 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, ddc_funcs->read_i2c = lp_read_i2c; ddc_funcs->write_dpcd = lp_write_dpcd; ddc_funcs->read_dpcd = lp_read_dpcd; - - config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback; - if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) { - ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c; - ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux; - } else { - ddc_funcs->atomic_write_poll_read_i2c = NULL; - ddc_funcs->atomic_write_poll_read_aux = NULL; - } + ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c; + ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux; memset(hdcp_work[i].aconnector, 0, sizeof(struct amdgpu_dm_connector *) * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index cc21337a182f..ac98c746c3de 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -131,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->serial_number = edid_buf->serial; edid_caps->manufacture_week = edid_buf->mfg_week; edid_caps->manufacture_year = edid_buf->mfg_year; + edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL); drm_edid_get_monitor_name(edid_buf, edid_caps->display_name, @@ -997,8 +998,8 @@ enum dc_edid_status dm_helpers_read_local_edid( struct amdgpu_dm_connector *aconnector = link->priv; struct drm_connector *connector = &aconnector->base; struct i2c_adapter *ddc; - int retry = 3; - enum dc_edid_status edid_status; + int retry = 25; + enum dc_edid_status edid_status = EDID_NO_RESPONSE; const struct drm_edid *drm_edid; const struct edid *edid; @@ -1028,7 +1029,7 @@ enum dc_edid_status dm_helpers_read_local_edid( } if (!drm_edid) - return EDID_NO_RESPONSE; + continue; edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() if (!edid || @@ -1046,7 +1047,7 @@ enum dc_edid_status dm_helpers_read_local_edid( &sink->dc_edid, &sink->edid_caps); - } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); + } while ((edid_status == EDID_BAD_CHECKSUM || edid_status == EDID_NO_RESPONSE) && --retry > 0); if (edid_status != EDID_OK) DRM_ERROR("EDID err: %d, on connector: %s", diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index a1c722112c22..0a2a3f233a0e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -476,6 +476,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) void amdgpu_dm_irq_suspend(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); int src; struct list_head *hnd_list_h; struct list_head *hnd_list_l; @@ -512,6 +513,9 @@ void amdgpu_dm_irq_suspend(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_disable(dev); } void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) @@ -537,6 +541,7 @@ void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); int src; struct list_head *hnd_list_h, *hnd_list_l; unsigned long irq_table_flags; @@ -557,6 +562,9 @@ void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); } /* @@ -893,6 +901,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct drm_connector_list_iter iter; int irq_type; int i; + bool use_polling = false; /* First, clear all hpd and hpdrx interrupts */ for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) { @@ -906,6 +915,8 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct amdgpu_dm_connector *amdgpu_dm_connector; const struct dc_link *dc_link; + use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; @@ -947,6 +958,9 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) } } drm_connector_list_iter_end(&iter); + + if (use_polling) + drm_kms_helper_poll_init(dev); } /** @@ -997,4 +1011,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) } } drm_connector_list_iter_end(&iter); + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_fini(dev); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 5e92eaa67aa3..dbd1da4d85d3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -884,26 +884,28 @@ struct dsc_mst_fairness_params { }; #if defined(CONFIG_DRM_AMD_DC_FP) -static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) +static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn) { - u8 link_coding_cap; - uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; + uint64_t effective_kbps = (uint64_t)kbps; - link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); - if (link_coding_cap == DP_128b_132b_ENCODING) - fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; + if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps + effective_kbps *= 1006; + effective_kbps = div_u64(effective_kbps, 1000); + } - return fec_overhead_multiplier_x1000; + return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000)); } -static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) +static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin) { - u64 peak_kbps = kbps; + uint64_t pbn_effective = (uint64_t)pbn; + + if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn + pbn_effective *= (1000000 / PEAK_FACTOR_X1000); + else + pbn_effective *= 1000; - peak_kbps *= 1006; - peak_kbps *= fec_overhead_multiplier_x1000; - peak_kbps = div_u64(peak_kbps, 1000 * 1000); - return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); + return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64); } static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, @@ -974,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options); dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; - kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); + kbps = pbn_to_kbps(pbn, false); dc_dsc_compute_config( param.sink->ctx->dc->res_pool->dscs[0], ¶m.sink->dsc_caps.dsc_dec_caps, @@ -1003,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state, int link_timeslots_used; int fair_pbn_alloc; int ret = 0; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { initial_slack[i] = - kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; + kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -1104,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state, int next_index; int remaining_to_try = 0; int ret; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); int var_pbn; for (i = 0; i < count; i++) { @@ -1137,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state, DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index); var_pbn = vars[next_index].pbn; - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, @@ -1197,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, int count = 0; int i, k, ret; bool debugfs_overwrite = false; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); struct drm_connector_state *new_conn_state; memset(params, 0, sizeof(params)); @@ -1278,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, DRM_DEBUG_DRIVER("MST_DSC Try no compression\n"); for (i = 0; i < count; i++) { vars[i + k].aconnector = params[i].aconnector; - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, @@ -1300,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, DRM_DEBUG_DRIVER("MST_DSC Try max compression\n"); for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1308,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (ret < 0) return ret; } else { - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1763,18 +1762,6 @@ clean_exit: return ret; } -static uint32_t kbps_from_pbn(unsigned int pbn) -{ - uint64_t kbps = (uint64_t)pbn; - - kbps *= (1000000 / PEAK_FACTOR_X1000); - kbps *= 8; - kbps *= 54; - kbps /= 64; - - return (uint32_t)kbps; -} - static bool is_dsc_common_config_possible(struct dc_stream_state *stream, struct dc_dsc_bw_range *bw_range) { @@ -1873,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( dc_link_get_highest_encoding_format(stream->link)); cur_link_settings = stream->link->verified_link_cap; root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); - virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); + virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true); /* pick the end to end bw bottleneck */ end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); @@ -1926,7 +1913,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( immediate_upstream_port = aconnector->mst_output_port->parent->port_parent; if (immediate_upstream_port) { - virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn); + virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true); virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); } else { /* For topology LCT 1 case - only one mstb*/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index e027798ece03..2e3ee78999d9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -37,6 +37,7 @@ #include "amdgpu_display.h" #include "amdgpu_dm_trace.h" #include "amdgpu_dm_plane.h" +#include "amdgpu_dm_colorop.h" #include "gc/gc_11_0_0_offset.h" #include "gc/gc_11_0_0_sh_mask.h" @@ -1782,6 +1783,39 @@ dm_atomic_plane_get_property(struct drm_plane *plane, return 0; } +#else + +#define MAX_COLOR_PIPELINES 5 + +static int +dm_plane_init_colorops(struct drm_plane *plane) +{ + struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES]; + struct drm_device *dev = plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc *dc = adev->dm.dc; + int len = 0; + int ret; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return 0; + + /* initialize pipeline */ + if (dc->ctx->dce_version >= DCN_VERSION_3_0) { + ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]); + if (ret) { + drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n", + plane->base.id, ret); + return ret; + } + len++; + + /* Create COLOR_PIPELINE property and attach */ + drm_plane_create_color_pipeline_property(plane, pipelines, len); + } + + return 0; +} #endif static const struct drm_plane_funcs dm_plane_funcs = { @@ -1890,7 +1924,12 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, #ifdef AMD_PRIVATE_COLOR dm_atomic_plane_attach_color_mgmt_properties(dm, plane); +#else + res = dm_plane_init_colorops(plane); + if (res) + return res; #endif + /* Create (reset) the plane state */ if (plane->funcs->reset) plane->funcs->reset(plane); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c index 80704d709e44..da94e3544b65 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -162,7 +162,7 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) if (link) { link->dc->link_srv->edp_setup_replay(link, stream); - link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total); + link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total, 0); DRM_DEBUG_DRIVER("Enabling replay...\n"); link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL); return true; diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index dc943abd6dba..7277ed21552f 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -36,7 +36,7 @@ DC_LIBS += dcn30 DC_LIBS += dcn301 DC_LIBS += dcn31 DC_LIBS += dml -DC_LIBS += dml2 +DC_LIBS += dml2_0 DC_LIBS += soc_and_ip_translator endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 154fd2c18e88..d1471f34e419 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -67,7 +67,9 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp, ATOM_OBJECT *object); static struct device_id device_type_from_device_id(uint16_t device_id); static uint32_t signal_to_ss_id(enum as_signal_type signal); -static uint32_t get_support_mask_for_device_id(struct device_id device_id); +static uint32_t get_support_mask_for_device_id( + enum dal_device_type device_type, + uint32_t enum_id); static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record( struct bios_parser *bp, ATOM_OBJECT *object); @@ -441,6 +443,7 @@ static enum bp_result get_firmware_info_v1_4( le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10; info->pll_info.max_output_pxl_clk_pll_frequency = le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10; + info->max_pixel_clock = le16_to_cpu(firmware_info->usMaxPixelClock) * 10; if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support) /* Since there is no information on the SS, report conservative @@ -497,6 +500,7 @@ static enum bp_result get_firmware_info_v2_1( info->external_clock_source_frequency_for_dp = le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10; info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level; + info->max_pixel_clock = le16_to_cpu(firmwareInfo->usMaxPixelClock) * 10; /* There should be only one entry in the SS info table for Memory Clock */ @@ -736,18 +740,94 @@ static enum bp_result bios_parser_transmitter_control( return bp->cmd_tbl.transmitter_control(bp, cntl); } +static enum bp_result bios_parser_select_crtc_source( + struct dc_bios *dcb, + struct bp_crtc_source_select *bp_params) +{ + struct bios_parser *bp = BP_FROM_DCB(dcb); + + if (!bp->cmd_tbl.select_crtc_source) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.select_crtc_source(bp, bp_params); +} + static enum bp_result bios_parser_encoder_control( struct dc_bios *dcb, struct bp_encoder_control *cntl) { struct bios_parser *bp = BP_FROM_DCB(dcb); + if (cntl->engine_id == ENGINE_ID_DACA) { + if (!bp->cmd_tbl.dac1_encoder_control) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.dac1_encoder_control( + bp, cntl->action == ENCODER_CONTROL_ENABLE, + cntl->pixel_clock, ATOM_DAC1_PS2); + } else if (cntl->engine_id == ENGINE_ID_DACB) { + if (!bp->cmd_tbl.dac2_encoder_control) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.dac2_encoder_control( + bp, cntl->action == ENCODER_CONTROL_ENABLE, + cntl->pixel_clock, ATOM_DAC1_PS2); + } + if (!bp->cmd_tbl.dig_encoder_control) return BP_RESULT_FAILURE; return bp->cmd_tbl.dig_encoder_control(bp, cntl); } +static enum bp_result bios_parser_dac_load_detection( + struct dc_bios *dcb, + enum engine_id engine_id, + enum dal_device_type device_type, + uint32_t enum_id) +{ + struct bios_parser *bp = BP_FROM_DCB(dcb); + struct dc_context *ctx = dcb->ctx; + struct bp_load_detection_parameters bp_params = {0}; + enum bp_result bp_result; + uint32_t bios_0_scratch; + uint32_t device_id_mask = 0; + + bp_params.engine_id = engine_id; + bp_params.device_id = get_support_mask_for_device_id(device_type, enum_id); + + if (engine_id != ENGINE_ID_DACA && + engine_id != ENGINE_ID_DACB) + return BP_RESULT_UNSUPPORTED; + + if (!bp->cmd_tbl.dac_load_detection) + return BP_RESULT_UNSUPPORTED; + + if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT) + device_id_mask = ATOM_S0_CRT1_MASK; + else if (bp_params.device_id == ATOM_DEVICE_CRT2_SUPPORT) + device_id_mask = ATOM_S0_CRT2_MASK; + else + return BP_RESULT_UNSUPPORTED; + + /* BIOS will write the detected devices to BIOS_SCRATCH_0, clear corresponding bit */ + bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0); + bios_0_scratch &= ~device_id_mask; + dm_write_reg(ctx, bp->base.regs->BIOS_SCRATCH_0, bios_0_scratch); + + bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params); + + if (bp_result != BP_RESULT_OK) + return bp_result; + + bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0); + + if (bios_0_scratch & device_id_mask) + return BP_RESULT_OK; + + return BP_RESULT_FAILURE; +} + static enum bp_result bios_parser_adjust_pixel_clock( struct dc_bios *dcb, struct bp_adjust_pixel_clock_parameters *bp_params) @@ -858,7 +938,7 @@ static bool bios_parser_is_device_id_supported( { struct bios_parser *bp = BP_FROM_DCB(dcb); - uint32_t mask = get_support_mask_for_device_id(id); + uint32_t mask = get_support_mask_for_device_id(id.device_type, id.enum_id); return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0; } @@ -2149,11 +2229,10 @@ static uint32_t signal_to_ss_id(enum as_signal_type signal) return clk_id_ss; } -static uint32_t get_support_mask_for_device_id(struct device_id device_id) +static uint32_t get_support_mask_for_device_id( + enum dal_device_type device_type, + uint32_t enum_id) { - enum dal_device_type device_type = device_id.device_type; - uint32_t enum_id = device_id.enum_id; - switch (device_type) { case DEVICE_TYPE_LCD: switch (enum_id) { @@ -2829,8 +2908,12 @@ static const struct dc_vbios_funcs vbios_funcs = { .is_device_id_supported = bios_parser_is_device_id_supported, /* COMMANDS */ + .select_crtc_source = bios_parser_select_crtc_source, + .encoder_control = bios_parser_encoder_control, + .dac_load_detection = bios_parser_dac_load_detection, + .transmitter_control = bios_parser_transmitter_control, .enable_crtc = bios_parser_enable_crtc, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 04eb647acc4e..550a9f1d03f8 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1480,10 +1480,10 @@ static enum bp_result get_embedded_panel_info_v2_1( /* not provided by VBIOS */ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0; - info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo - & ATOM_HSYNC_POLARITY); - info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo - & ATOM_VSYNC_POLARITY); + info->lcd_timing.misc_info.H_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo & + ATOM_HSYNC_POLARITY); + info->lcd_timing.misc_info.V_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo & + ATOM_VSYNC_POLARITY); /* not provided by VBIOS */ info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 58e88778da7f..22457f417e65 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -52,7 +52,9 @@ static void init_transmitter_control(struct bios_parser *bp); static void init_set_pixel_clock(struct bios_parser *bp); static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp); static void init_adjust_display_pll(struct bios_parser *bp); +static void init_select_crtc_source(struct bios_parser *bp); static void init_dac_encoder_control(struct bios_parser *bp); +static void init_dac_load_detection(struct bios_parser *bp); static void init_dac_output_control(struct bios_parser *bp); static void init_set_crtc_timing(struct bios_parser *bp); static void init_enable_crtc(struct bios_parser *bp); @@ -69,7 +71,9 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp) init_set_pixel_clock(bp); init_enable_spread_spectrum_on_ppll(bp); init_adjust_display_pll(bp); + init_select_crtc_source(bp); init_dac_encoder_control(bp); + init_dac_load_detection(bp); init_dac_output_control(bp); init_set_crtc_timing(bp); init_enable_crtc(bp); @@ -1612,6 +1616,198 @@ static enum bp_result adjust_display_pll_v3( /******************************************************************************* ******************************************************************************** ** + ** SELECT CRTC SOURCE + ** + ******************************************************************************** + *******************************************************************************/ + +static enum bp_result select_crtc_source_v1( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); +static enum bp_result select_crtc_source_v2( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); +static enum bp_result select_crtc_source_v3( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); + +static void init_select_crtc_source(struct bios_parser *bp) +{ + switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) { + case 1: + bp->cmd_tbl.select_crtc_source = select_crtc_source_v1; + break; + case 2: + bp->cmd_tbl.select_crtc_source = select_crtc_source_v2; + break; + case 3: + bp->cmd_tbl.select_crtc_source = select_crtc_source_v3; + break; + default: + bp->cmd_tbl.select_crtc_source = NULL; + break; + } +} + +static enum bp_result select_crtc_source_v1( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + SELECT_CRTC_SOURCE_PS_ALLOCATION params; + + if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC)) + return BP_RESULT_BADINPUT; + + switch (bp_params->engine_id) { + case ENGINE_ID_DACA: + params.ucDevice = ATOM_DEVICE_CRT1_INDEX; + break; + case ENGINE_ID_DACB: + params.ucDevice = ATOM_DEVICE_CRT2_INDEX; + break; + default: + return BP_RESULT_BADINPUT; + } + + if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) + result = BP_RESULT_OK; + + return result; +} + +static bool select_crtc_source_v2_encoder_id( + enum engine_id engine_id, uint8_t *out_encoder_id) +{ + uint8_t encoder_id = 0; + + switch (engine_id) { + case ENGINE_ID_DIGA: + encoder_id = ASIC_INT_DIG1_ENCODER_ID; + break; + case ENGINE_ID_DIGB: + encoder_id = ASIC_INT_DIG2_ENCODER_ID; + break; + case ENGINE_ID_DIGC: + encoder_id = ASIC_INT_DIG3_ENCODER_ID; + break; + case ENGINE_ID_DIGD: + encoder_id = ASIC_INT_DIG4_ENCODER_ID; + break; + case ENGINE_ID_DIGE: + encoder_id = ASIC_INT_DIG5_ENCODER_ID; + break; + case ENGINE_ID_DIGF: + encoder_id = ASIC_INT_DIG6_ENCODER_ID; + break; + case ENGINE_ID_DIGG: + encoder_id = ASIC_INT_DIG7_ENCODER_ID; + break; + case ENGINE_ID_DACA: + encoder_id = ASIC_INT_DAC1_ENCODER_ID; + break; + case ENGINE_ID_DACB: + encoder_id = ASIC_INT_DAC2_ENCODER_ID; + break; + default: + return false; + } + + *out_encoder_id = encoder_id; + return true; +} + +static bool select_crtc_source_v2_encoder_mode( + enum signal_type signal_type, uint8_t *out_encoder_mode) +{ + uint8_t encoder_mode = 0; + + switch (signal_type) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + encoder_mode = ATOM_ENCODER_MODE_DVI; + break; + case SIGNAL_TYPE_HDMI_TYPE_A: + encoder_mode = ATOM_ENCODER_MODE_HDMI; + break; + case SIGNAL_TYPE_LVDS: + encoder_mode = ATOM_ENCODER_MODE_LVDS; + break; + case SIGNAL_TYPE_RGB: + encoder_mode = ATOM_ENCODER_MODE_CRT; + break; + case SIGNAL_TYPE_DISPLAY_PORT: + encoder_mode = ATOM_ENCODER_MODE_DP; + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + encoder_mode = ATOM_ENCODER_MODE_DP_MST; + break; + case SIGNAL_TYPE_EDP: + encoder_mode = ATOM_ENCODER_MODE_DP; + break; + default: + return false; + } + + *out_encoder_mode = encoder_mode; + return true; +} + +static enum bp_result select_crtc_source_v2( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + SELECT_CRTC_SOURCE_PARAMETERS_V3 params; + + if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC)) + return BP_RESULT_BADINPUT; + + if (!select_crtc_source_v2_encoder_id( + bp_params->engine_id, + ¶ms.ucEncoderID)) + return BP_RESULT_BADINPUT; + if (!select_crtc_source_v2_encoder_mode( + bp_params->sink_signal, + ¶ms.ucEncodeMode)) + return BP_RESULT_BADINPUT; + + if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) + result = BP_RESULT_OK; + + return result; +} + +static enum bp_result select_crtc_source_v3( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + SELECT_CRTC_SOURCE_PARAMETERS_V3 params; + + if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC)) + return BP_RESULT_BADINPUT; + + if (!select_crtc_source_v2_encoder_id( + bp_params->engine_id, + ¶ms.ucEncoderID)) + return BP_RESULT_BADINPUT; + if (!select_crtc_source_v2_encoder_mode( + bp_params->sink_signal, + ¶ms.ucEncodeMode)) + return BP_RESULT_BADINPUT; + + params.ucDstBpc = bp_params->bit_depth; + + if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) + result = BP_RESULT_OK; + + return result; +} + +/******************************************************************************* + ******************************************************************************** + ** ** DAC ENCODER CONTROL ** ******************************************************************************** @@ -1711,6 +1907,96 @@ static enum bp_result dac2_encoder_control_v1( /******************************************************************************* ******************************************************************************** ** + ** DAC LOAD DETECTION + ** + ******************************************************************************** + *******************************************************************************/ + +static enum bp_result dac_load_detection_v1( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params); + +static enum bp_result dac_load_detection_v3( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params); + +static void init_dac_load_detection(struct bios_parser *bp) +{ + switch (BIOS_CMD_TABLE_PARA_REVISION(DAC_LoadDetection)) { + case 1: + case 2: + bp->cmd_tbl.dac_load_detection = dac_load_detection_v1; + break; + case 3: + default: + bp->cmd_tbl.dac_load_detection = dac_load_detection_v3; + break; + } +} + +static void dac_load_detect_prepare_params( + struct _DAC_LOAD_DETECTION_PS_ALLOCATION *params, + enum engine_id engine_id, + uint16_t device_id, + uint8_t misc) +{ + uint8_t dac_type = ENGINE_ID_DACA; + + if (engine_id == ENGINE_ID_DACB) + dac_type = ATOM_DAC_B; + + params->sDacload.usDeviceID = cpu_to_le16(device_id); + params->sDacload.ucDacType = dac_type; + params->sDacload.ucMisc = misc; +} + +static enum bp_result dac_load_detection_v1( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + DAC_LOAD_DETECTION_PS_ALLOCATION params; + + dac_load_detect_prepare_params( + ¶ms, + bp_params->engine_id, + bp_params->device_id, + 0); + + if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params)) + result = BP_RESULT_OK; + + return result; +} + +static enum bp_result dac_load_detection_v3( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + DAC_LOAD_DETECTION_PS_ALLOCATION params; + + uint8_t misc = 0; + + if (bp_params->device_id == ATOM_DEVICE_CV_SUPPORT || + bp_params->device_id == ATOM_DEVICE_TV1_SUPPORT) + misc = DAC_LOAD_MISC_YPrPb; + + dac_load_detect_prepare_params( + ¶ms, + bp_params->engine_id, + bp_params->device_id, + misc); + + if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params)) + result = BP_RESULT_OK; + + return result; +} + +/******************************************************************************* + ******************************************************************************** + ** ** DAC OUTPUT CONTROL ** ******************************************************************************** diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h index ad533775e724..e89b1ba0048b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h @@ -52,6 +52,9 @@ struct cmd_tbl { enum bp_result (*adjust_display_pll)( struct bios_parser *bp, struct bp_adjust_pixel_clock_parameters *bp_params); + enum bp_result (*select_crtc_source)( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); enum bp_result (*dac1_encoder_control)( struct bios_parser *bp, bool enable, @@ -68,6 +71,9 @@ struct cmd_tbl { enum bp_result (*dac2_output_control)( struct bios_parser *bp, bool enable); + enum bp_result (*dac_load_detection)( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params); enum bp_result (*set_crtc_timing)( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 9e63fa72101c..db687a13174d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -509,16 +509,16 @@ void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_b regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) + if (regs_and_bypass->dppclk_bypass > 4) regs_and_bypass->dppclk_bypass = 0; regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) + if (regs_and_bypass->dcfclk_bypass > 4) regs_and_bypass->dcfclk_bypass = 0; regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) + if (regs_and_bypass->dispclk_bypass > 4) regs_and_bypass->dispclk_bypass = 0; regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) + if (regs_and_bypass->dprefclk_bypass > 4) regs_and_bypass->dprefclk_bypass = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index b315ed91e010..3a881451e9da 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -40,7 +40,7 @@ #include "dm_helpers.h" #include "dc_dmub_srv.h" - +#include "reg_helper.h" #include "logger_types.h" #undef DC_LOGGER #define DC_LOGGER \ @@ -48,9 +48,43 @@ #include "link_service.h" +#define MAX_INSTANCE 7 +#define MAX_SEGMENT 8 + +struct IP_BASE_INSTANCE { + unsigned int segment[MAX_SEGMENT]; +}; + +struct IP_BASE { + struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; +}; + +static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } }, + { { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } }, + { { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } }, + { { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } }, + { { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } }, + { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } } } }; + +#define regCLK1_CLK0_CURRENT_CNT 0x0314 +#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK1_CURRENT_CNT 0x0315 +#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK2_CURRENT_CNT 0x0316 +#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK3_CURRENT_CNT 0x0317 +#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK4_CURRENT_CNT 0x0318 +#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK5_CURRENT_CNT 0x0319 +#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0 + #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) +#define REG(reg_name) \ + (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) + #define UNSUPPORTED_DCFCLK 10000000 #define MIN_DPP_DISP_CLK 100000 @@ -245,9 +279,38 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } +static void dcn315_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + // read dtbclk + internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT); + + // read dcfclk + internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT); + + // read dppclk + internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT); + + // read dprefclk + internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT); + + // read dispclk + internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT); +} + static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { + struct dcn35_clk_internal internal = {0}; + + dcn315_dump_clk_registers_internal(&internal, clk_mgr_base); + + regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10; + regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10; + regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10; + regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10; + regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; return; } @@ -594,13 +657,32 @@ static struct clk_mgr_funcs dcn315_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn315_update_clocks, - .init_clocks = dcn31_init_clocks, + .init_clocks = dcn315_init_clocks, .enable_pme_wa = dcn315_enable_pme_wa, .are_clock_states_equal = dcn31_are_clock_states_equal, .notify_wm_ranges = dcn315_notify_wm_ranges }; extern struct clk_mgr_funcs dcn3_fpga_funcs; +void dcn315_init_clocks(struct clk_mgr *clk_mgr) +{ + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr_int); + struct clk_log_info log_info = {0}; + + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; + clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; + + dcn315_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn315->base.base, &log_info); + clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000; +} + void dcn315_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn315 *clk_mgr, @@ -661,6 +743,7 @@ void dcn315_clk_mgr_construct( /* Saved clocks configured at boot for debug purposes */ dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + clk_mgr->base.base.clks.dispclk_khz = clk_mgr->base.base.boot_snapshot.dispclk * 1000; clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h index ac36ddf5dd1a..642ae3d4a790 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h @@ -44,6 +44,7 @@ void dcn315_clk_mgr_construct(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg); +void dcn315_init_clocks(struct clk_mgr *clk_mgr); void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); #endif //__DCN315_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index b11383fba35f..dfd0c9505af0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps); if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz) new_clocks->ref_dtbclk_khz = 600000; + else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000) + new_clocks->ref_dtbclk_khz = 0; /* * if it is safe to lower, but we are already in the lower state, we don't have to do anything @@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT); - if (actual_dtbclk) { + if (actual_dtbclk > 590000) { clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz; clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; } @@ -633,16 +635,16 @@ static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) + if (regs_and_bypass->dppclk_bypass > 4) regs_and_bypass->dppclk_bypass = 0; regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) + if (regs_and_bypass->dcfclk_bypass > 4) regs_and_bypass->dcfclk_bypass = 0; regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) + if (regs_and_bypass->dispclk_bypass > 4) regs_and_bypass->dispclk_bypass = 0; regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) + if (regs_and_bypass->dprefclk_bypass > 4) regs_and_bypass->dprefclk_bypass = 0; if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { @@ -1293,6 +1295,35 @@ static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr, dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz); } +static unsigned int dcn35_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + unsigned int num_clk_levels; + + switch (clk_type) { + case CLK_TYPE_DISPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return num_clk_levels ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dispclk; + case CLK_TYPE_DPPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; + return num_clk_levels ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dppclk; + case CLK_TYPE_DSCCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return num_clk_levels ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 : + clk_mgr->base.boot_snapshot.dispclk / 3; + default: + break; + } + + return 0; +} + static struct clk_mgr_funcs dcn35_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, @@ -1304,6 +1335,7 @@ static struct clk_mgr_funcs dcn35_funcs = { .set_low_power_state = dcn35_set_low_power_state, .exit_low_power_state = dcn35_exit_low_power_state, .is_ips_supported = dcn35_is_ips_supported, + .get_max_clock_khz = dcn35_get_max_clock_khz, }; struct clk_mgr_funcs dcn35_fpga_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5f2d5638c819..8be9cbd43e18 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -83,7 +83,7 @@ #include "hw_sequencer_private.h" #if defined(CONFIG_DRM_AMD_DC_FP) -#include "dml2/dml2_internal_types.h" +#include "dml2_0/dml2_internal_types.h" #include "soc_and_ip_translator.h" #endif @@ -148,10 +148,16 @@ static const char DC_BUILD_ID[] = "production-build"; /* Private functions */ -static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) +static inline void elevate_update_type( + struct surface_update_descriptor *descriptor, + enum surface_update_type new_type, + enum dc_lock_descriptor new_locks +) { - if (new > *original) - *original = new; + if (new_type > descriptor->update_type) + descriptor->update_type = new_type; + + descriptor->lock_descriptor |= new_locks; } static void destroy_links(struct dc *dc) @@ -297,6 +303,7 @@ static bool create_links( link->link_id.id = CONNECTOR_ID_VIRTUAL; link->link_id.enum_id = ENUM_ID_1; link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED; link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); if (!link->link_enc) { @@ -493,9 +500,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, 1, *adjust); stream->adjust.timing_adjust_pending = false; + + if (dc->hwss.notify_cursor_offload_drr_update) + dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream); + return true; } } + return false; } @@ -1143,8 +1155,8 @@ static bool dc_construct(struct dc *dc, /* set i2c speed if not done by the respective dcnxxx__resource.c */ if (dc->caps.i2c_speed_in_khz_hdcp == 0) dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; - if (dc->caps.max_optimizable_video_width == 0) - dc->caps.max_optimizable_video_width = 5120; + if (dc->check_config.max_optimizable_video_width == 0) + dc->check_config.max_optimizable_video_width = 5120; dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); if (!dc->clk_mgr) goto fail; @@ -2135,6 +2147,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c if (!dcb->funcs->is_accelerated_mode(dcb)) { disable_vbios_mode_if_required(dc, context); dc->hwss.enable_accelerated_mode(dc, context); + } else if (get_seamless_boot_stream_count(dc->current_state) > 0) { + /* If the previous Stream still retains the apply seamless boot flag, + * it means the OS has not actually performed a flip yet. + * At this point, if we receive dc_commit_streams again, we should + * once more check whether the actual HW timing matches what the OS + * has provided + */ + disable_vbios_mode_if_required(dc, context); } if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) { @@ -2158,8 +2178,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c */ if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, true); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, true); if (dc->hwss.update_dsc_pg) dc->hwss.update_dsc_pg(dc, context, false); @@ -2188,8 +2208,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); } + for (i = 0; i < dc->current_state->stream_count; i++) + dc_dmub_srv_control_cursor_offload(dc, dc->current_state, dc->current_state->streams[i], false); + result = dc->hwss.apply_ctx_to_hw(dc, context); + for (i = 0; i < context->stream_count; i++) + dc_dmub_srv_control_cursor_offload(dc, context, context->streams[i], true); + if (result != DC_OK) { /* Application of dc_state to hardware stopped. */ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; @@ -2229,8 +2255,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc->hwss.commit_subvp_config(dc, context); if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; @@ -2645,47 +2671,50 @@ static bool is_surface_in_context( return false; } -static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u) +static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; - enum surface_update_type update_type = UPDATE_TYPE_FAST; + struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; if (!u->plane_info) - return UPDATE_TYPE_FAST; + return update_type; + + // `plane_info` present means at least `STREAM` lock is required + elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); if (u->plane_info->color_space != u->surface->color_space) { update_flags->bits.color_space_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { update_flags->bits.horizontal_mirror_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } if (u->plane_info->rotation != u->surface->rotation) { update_flags->bits.rotation_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (u->plane_info->format != u->surface->format) { update_flags->bits.pixel_format_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (u->plane_info->stereo_format != u->surface->stereo_format) { update_flags->bits.stereo_format_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { update_flags->bits.per_pixel_alpha_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { update_flags->bits.global_alpha_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } if (u->plane_info->dcc.enable != u->surface->dcc.enable @@ -2697,7 +2726,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc, * recalculate stutter period. */ update_flags->bits.dcc_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (resource_pixel_format_to_bpp(u->plane_info->format) != @@ -2706,30 +2735,41 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc, * and DML calculation */ update_flags->bits.bpp_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { update_flags->bits.plane_size_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } + const struct dc_tiling_info *tiling = &u->plane_info->tiling_info; - if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, - sizeof(struct dc_tiling_info)) != 0) { + if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) { update_flags->bits.swizzle_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); - - /* todo: below are HW dependent, we should add a hook to - * DCE/N resource and validated there. - */ - if (!dc->debug.skip_full_updated_if_possible) { - /* swizzled mode requires RQ to be setup properly, - * thus need to run DML to calculate RQ settings - */ - update_flags->bits.bandwidth_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); + + switch (tiling->gfxversion) { + case DcGfxVersion9: + case DcGfxVersion10: + case DcGfxVersion11: + if (tiling->gfx9.swizzle != DC_SW_LINEAR) { + update_flags->bits.bandwidth_change = 1; + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); + } + break; + case DcGfxAddr3: + if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) { + update_flags->bits.bandwidth_change = 1; + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); + } + break; + case DcGfxVersion7: + case DcGfxVersion8: + case DcGfxVersionUnknown: + default: + break; } } @@ -2737,14 +2777,18 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc, return update_type; } -static enum surface_update_type get_scaling_info_update_type( - const struct dc *dc, +static struct surface_update_descriptor get_scaling_info_update_type( + const struct dc_check_config *check_config, const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; + struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; if (!u->scaling_info) - return UPDATE_TYPE_FAST; + return update_type; + + // `scaling_info` present means at least `STREAM` lock is required + elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); if (u->scaling_info->src_rect.width != u->surface->src_rect.width || u->scaling_info->src_rect.height != u->surface->src_rect.height @@ -2755,6 +2799,7 @@ static enum surface_update_type get_scaling_info_update_type( || u->scaling_info->scaling_quality.integer_scaling != u->surface->scaling_quality.integer_scaling) { update_flags->bits.scaling_change = 1; + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); if (u->scaling_info->src_rect.width > u->surface->src_rect.width || u->scaling_info->src_rect.height > u->surface->src_rect.height) @@ -2768,7 +2813,7 @@ static enum surface_update_type get_scaling_info_update_type( /* Making dst rect smaller requires a bandwidth change */ update_flags->bits.bandwidth_change = 1; - if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && + if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width && (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) /* Changing clip size of a large surface may result in MPC slice count change */ @@ -2780,123 +2825,109 @@ static enum surface_update_type get_scaling_info_update_type( || u->scaling_info->clip_rect.x != u->surface->clip_rect.x || u->scaling_info->clip_rect.y != u->surface->clip_rect.y || u->scaling_info->dst_rect.x != u->surface->dst_rect.x - || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) + || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) { + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); update_flags->bits.position_change = 1; + } - /* process every update flag before returning */ - if (update_flags->bits.clock_change - || update_flags->bits.bandwidth_change - || update_flags->bits.scaling_change) - return UPDATE_TYPE_FULL; - - if (update_flags->bits.position_change) - return UPDATE_TYPE_MED; - - return UPDATE_TYPE_FAST; + return update_type; } -static enum surface_update_type det_surface_update(const struct dc *dc, - const struct dc_surface_update *u) +static struct surface_update_descriptor det_surface_update( + const struct dc_check_config *check_config, + struct dc_surface_update *u) { - const struct dc_state *context = dc->current_state; - enum surface_update_type type; - enum surface_update_type overall_type = UPDATE_TYPE_FAST; + struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; union surface_update_flags *update_flags = &u->surface->update_flags; - if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { + if (u->surface->force_full_update) { update_flags->raw = 0xFFFFFFFF; - return UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); + return overall_type; } update_flags->raw = 0; // Reset all flags - type = get_plane_info_update_type(dc, u); - elevate_update_type(&overall_type, type); + struct surface_update_descriptor inner_type = get_plane_info_update_type(u); + + elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); - type = get_scaling_info_update_type(dc, u); - elevate_update_type(&overall_type, type); + inner_type = get_scaling_info_update_type(check_config, u); + elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); if (u->flip_addr) { update_flags->bits.addr_update = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { update_flags->bits.tmz_changed = 1; - elevate_update_type(&overall_type, UPDATE_TYPE_FULL); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } } - if (u->in_transfer_func) + if (u->in_transfer_func) { update_flags->bits.in_transfer_func_change = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); + } - if (u->input_csc_color_matrix) + if (u->input_csc_color_matrix) { update_flags->bits.input_csc_change = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } - if (u->coeff_reduction_factor) + if (u->coeff_reduction_factor) { update_flags->bits.coeff_reduction_change = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } - if (u->gamut_remap_matrix) + if (u->gamut_remap_matrix) { update_flags->bits.gamut_remap_change = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } - if (u->blend_tf) + if (u->blend_tf || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) { update_flags->bits.gamma_change = 1; - - if (u->gamma) { - enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; - - if (u->plane_info) - format = u->plane_info->format; - else - format = u->surface->format; - - if (dce_use_lut(format)) - update_flags->bits.gamma_change = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); } - if (u->lut3d_func || u->func_shaper) + if (u->lut3d_func || u->func_shaper) { update_flags->bits.lut_3d = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } if (u->hdr_mult.value) if (u->hdr_mult.value != u->surface->hdr_mult.value) { + // TODO: Should be fast? update_flags->bits.hdr_mult = 1; - elevate_update_type(&overall_type, UPDATE_TYPE_MED); + elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } if (u->sdr_white_level_nits) if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) { + // TODO: Should be fast? update_flags->bits.sdr_white_level_nits = 1; - elevate_update_type(&overall_type, UPDATE_TYPE_FULL); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (u->cm2_params) { - if ((u->cm2_params->component_settings.shaper_3dlut_setting - != u->surface->mcm_shaper_3dlut_setting) - || (u->cm2_params->component_settings.lut1d_enable - != u->surface->mcm_lut1d_enable)) - update_flags->bits.mcm_transfer_function_enable_change = 1; - if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src - != u->surface->mcm_luts.lut3d_data.lut3d_src) + if (u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting + || u->cm2_params->component_settings.lut1d_enable != u->surface->mcm_lut1d_enable + || u->cm2_params->cm2_luts.lut3d_data.lut3d_src != u->surface->mcm_luts.lut3d_data.lut3d_src) { update_flags->bits.mcm_transfer_function_enable_change = 1; - } - if (update_flags->bits.in_transfer_func_change) { - type = UPDATE_TYPE_MED; - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); + } } if (update_flags->bits.lut_3d && u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { - type = UPDATE_TYPE_FULL; - elevate_update_type(&overall_type, type); - } - if (update_flags->bits.mcm_transfer_function_enable_change) { - type = UPDATE_TYPE_FULL; - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } - if (dc->debug.enable_legacy_fast_update && + if (check_config->enable_legacy_fast_update && (update_flags->bits.gamma_change || update_flags->bits.gamut_remap_change || update_flags->bits.input_csc_change || update_flags->bits.coeff_reduction_change)) { - type = UPDATE_TYPE_FULL; - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } return overall_type; } @@ -2924,32 +2955,26 @@ static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_upda } } -static enum surface_update_type check_update_surfaces_for_stream( - struct dc *dc, +static struct surface_update_descriptor check_update_surfaces_for_stream( + const struct dc_check_config *check_config, struct dc_surface_update *updates, int surface_count, - struct dc_stream_update *stream_update, - const struct dc_stream_status *stream_status) + struct dc_stream_update *stream_update) { - int i; - enum surface_update_type overall_type = UPDATE_TYPE_FAST; - - if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc)) - overall_type = UPDATE_TYPE_FULL; - - if (stream_status == NULL || stream_status->plane_count != surface_count) - overall_type = UPDATE_TYPE_FULL; + struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; if (stream_update && stream_update->pending_test_pattern) { - overall_type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (stream_update && stream_update->hw_cursor_req) { - overall_type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } /* some stream updates require passive update */ if (stream_update) { + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + union stream_update_flags *su_flags = &stream_update->stream->update_flags; if ((stream_update->src.height != 0 && stream_update->src.width != 0) || @@ -2957,14 +2982,16 @@ static enum surface_update_type check_update_surfaces_for_stream( stream_update->integer_scaling_update) su_flags->bits.scaling = 1; - if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) + if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func) su_flags->bits.out_tf = 1; if (stream_update->abm_level) su_flags->bits.abm_level = 1; - if (stream_update->dpms_off) + if (stream_update->dpms_off) { su_flags->bits.dpms_off = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL | LOCK_DESCRIPTOR_LINK); + } if (stream_update->gamut_remap) su_flags->bits.gamut_remap = 1; @@ -2992,24 +3019,27 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->output_color_space) su_flags->bits.out_csc = 1; - if (su_flags->raw != 0) - overall_type = UPDATE_TYPE_FULL; + // TODO: Make each elevation explicit, as to not override fast stream in crct_timing_adjust + if (su_flags->raw) + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); - if (stream_update->output_csc_transform) + // Non-global cases + if (stream_update->output_csc_transform) { su_flags->bits.out_csc = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } - /* Output transfer function changes do not require bandwidth recalculation, - * so don't trigger a full update - */ - if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) + if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func) { su_flags->bits.out_tf = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } } - for (i = 0 ; i < surface_count; i++) { - enum surface_update_type type = - det_surface_update(dc, &updates[i]); + for (int i = 0 ; i < surface_count; i++) { + struct surface_update_descriptor inner_type = + det_surface_update(check_config, &updates[i]); - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); } return overall_type; @@ -3020,44 +3050,18 @@ static enum surface_update_type check_update_surfaces_for_stream( * * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types */ -enum surface_update_type dc_check_update_surfaces_for_stream( - struct dc *dc, +struct surface_update_descriptor dc_check_update_surfaces_for_stream( + const struct dc_check_config *check_config, struct dc_surface_update *updates, int surface_count, - struct dc_stream_update *stream_update, - const struct dc_stream_status *stream_status) + struct dc_stream_update *stream_update) { - int i; - enum surface_update_type type; - if (stream_update) stream_update->stream->update_flags.raw = 0; - for (i = 0; i < surface_count; i++) + for (size_t i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0; - type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); - if (type == UPDATE_TYPE_FULL) { - if (stream_update) { - uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; - stream_update->stream->update_flags.raw = 0xFFFFFFFF; - stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; - } - for (i = 0; i < surface_count; i++) - updates[i].surface->update_flags.raw = 0xFFFFFFFF; - } - - if (type == UPDATE_TYPE_FAST) { - // If there's an available clock comparator, we use that. - if (dc->clk_mgr->funcs->are_clock_states_equal) { - if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) - dc->optimized_required = true; - // Else we fallback to mem compare. - } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { - dc->optimized_required = true; - } - } - - return type; + return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update); } static struct dc_stream_status *stream_get_status( @@ -3388,7 +3392,11 @@ static void restore_planes_and_stream_state( for (i = 0; i < status->plane_count; i++) { dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]); } + + // refcount is persistent + struct kref temp_refcount = stream->refcount; *stream = scratch->stream_state; + stream->refcount = temp_refcount; } /** @@ -3426,6 +3434,13 @@ static void update_seamless_boot_flags(struct dc *dc, } } +static bool full_update_required_weak( + const struct dc *dc, + const struct dc_surface_update *srf_updates, + int surface_count, + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream); + /** * update_planes_and_stream_state() - The function takes planes and stream * updates as inputs and determines the appropriate update type. If update type @@ -3472,7 +3487,10 @@ static bool update_planes_and_stream_state(struct dc *dc, context = dc->current_state; update_type = dc_check_update_surfaces_for_stream( - dc, srf_updates, surface_count, stream_update, stream_status); + &dc->check_config, srf_updates, surface_count, stream_update).update_type; + if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) + update_type = UPDATE_TYPE_FULL; + /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. @@ -3504,6 +3522,16 @@ static bool update_planes_and_stream_state(struct dc *dc, } } + if (update_type == UPDATE_TYPE_FULL) { + if (stream_update) { + uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; + stream_update->stream->update_flags.raw = 0xFFFFFFFF; + stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; + } + for (i = 0; i < surface_count; i++) + srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF; + } + if (update_type >= update_surface_trace_level) update_surface_trace(dc, srf_updates, surface_count); @@ -4149,7 +4177,7 @@ static void commit_planes_for_stream(struct dc *dc, if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) if (top_pipe_to_program && top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { - if (should_use_dmub_lock(stream->link)) { + if (should_use_dmub_inbox1_lock(dc, stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -4176,16 +4204,16 @@ static void commit_planes_for_stream(struct dc *dc, if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, true); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, true); dc->hwss.interdependent_update_lock(dc, context, true); } else { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, true); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, true); /* Lock the top pipe while updating plane addrs, since freesync requires * plane addr update event triggers to be synchronized. @@ -4228,9 +4256,8 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); - + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); return; } @@ -4419,7 +4446,7 @@ static void commit_planes_for_stream(struct dc *dc, top_pipe_to_program->stream_res.tg, CRTC_STATE_VACTIVE); - if (should_use_dmub_lock(stream->link)) { + if (should_use_dmub_inbox1_lock(dc, stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -4467,13 +4494,13 @@ static void commit_planes_for_stream(struct dc *dc, if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); } else { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); } // Fire manual trigger only when bottom plane is flipped @@ -4489,6 +4516,8 @@ static void commit_planes_for_stream(struct dc *dc, pipe_ctx->plane_state->skip_manual_trigger) continue; + if (dc->hwss.program_cursor_offload_now) + dc->hwss.program_cursor_offload_now(dc, pipe_ctx); if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); } @@ -4994,7 +5023,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update, } } -static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) +static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count) { int i; @@ -5035,18 +5064,44 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_ return false; } -static bool full_update_required(struct dc *dc, - struct dc_surface_update *srf_updates, +static bool full_update_required_weak( + const struct dc *dc, + const struct dc_surface_update *srf_updates, int surface_count, - struct dc_stream_update *stream_update, - struct dc_stream_state *stream) + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream) { - - int i; - struct dc_stream_status *stream_status; const struct dc_state *context = dc->current_state; + if (srf_updates) + for (int i = 0; i < surface_count; i++) + if (!is_surface_in_context(context, srf_updates[i].surface)) + return true; - for (i = 0; i < surface_count; i++) { + if (stream) { + const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream); + if (stream_status == NULL || stream_status->plane_count != surface_count) + return true; + } + if (dc->idle_optimizations_allowed) + return true; + + if (dc_can_clear_cursor_limit(dc)) + return true; + + return false; +} + +static bool full_update_required( + const struct dc *dc, + const struct dc_surface_update *srf_updates, + int surface_count, + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream) +{ + if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) + return true; + + for (int i = 0; i < surface_count; i++) { if (srf_updates && (srf_updates[i].plane_info || srf_updates[i].scaling_info || @@ -5062,8 +5117,7 @@ static bool full_update_required(struct dc *dc, srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || (srf_updates[i].cm2_params && (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting || - srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) || - !is_surface_in_context(context, srf_updates[i].surface))) + srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)))) return true; } @@ -5099,26 +5153,16 @@ static bool full_update_required(struct dc *dc, stream_update->hw_cursor_req)) return true; - if (stream) { - stream_status = dc_stream_get_status(stream); - if (stream_status == NULL || stream_status->plane_count != surface_count) - return true; - } - if (dc->idle_optimizations_allowed) - return true; - - if (dc_can_clear_cursor_limit(dc)) - return true; - return false; } -static bool fast_update_only(struct dc *dc, - struct dc_fast_update *fast_update, - struct dc_surface_update *srf_updates, +static bool fast_update_only( + const struct dc *dc, + const struct dc_fast_update *fast_update, + const struct dc_surface_update *srf_updates, int surface_count, - struct dc_stream_update *stream_update, - struct dc_stream_state *stream) + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream) { return fast_updates_exist(fast_update, surface_count) && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); @@ -5181,7 +5225,7 @@ static bool update_planes_and_stream_v2(struct dc *dc, commit_minimal_transition_state_in_dc_update(dc, context, stream, srf_updates, surface_count); - if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { + if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, surface_count, @@ -5224,7 +5268,7 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc, stream_update); if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && - !dc->debug.enable_legacy_fast_update) + !dc->check_config.enable_legacy_fast_update) commit_planes_for_stream_fast(dc, srf_updates, surface_count, @@ -5350,7 +5394,8 @@ bool dc_update_planes_and_stream(struct dc *dc, * specially handle compatibility problems with transitions among those * features as they are now transparent to the new sequence. */ - if (dc->ctx->dce_version >= DCN_VERSION_4_01) + if (dc->ctx->dce_version >= DCN_VERSION_4_01 || dc->ctx->dce_version == DCN_VERSION_3_2 || + dc->ctx->dce_version == DCN_VERSION_3_21) ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, stream, stream_update); else @@ -5935,6 +5980,101 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, return true; } +bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits, + uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline) +{ + bool status = false; + struct dc *dc = link->ctx->dc; + union dmub_rb_cmd cmd; + uint8_t otg_inst = 0; + unsigned int panel_inst = 0; + struct pipe_ctx *pipe_ctx = NULL; + struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; + int i = 0; + + // get panel_inst + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return status; + + // get otg_inst + for (i = 0; i < MAX_PIPES; i++) { + if (res_ctx && + res_ctx->pipe_ctx[i].stream && + res_ctx->pipe_ctx[i].stream->link && + res_ctx->pipe_ctx[i].stream->link == link && + res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { + pipe_ctx = &res_ctx->pipe_ctx[i]; + //TODO: refactor for multi edp support + break; + } + } + + if (pipe_ctx) + otg_inst = pipe_ctx->stream_res.tg->inst; + + // before enable smart power OLED, we need to call set pipe for DMUB to set ABM config + if (enable) { + if (dc->hwss.set_pipe && pipe_ctx) + dc->hwss.set_pipe(pipe_ctx); + } + + // fill in cmd + memset(&cmd, 0, sizeof(cmd)); + + cmd.smart_power_oled_enable.header.type = DMUB_CMD__SMART_POWER_OLED; + cmd.smart_power_oled_enable.header.sub_type = DMUB_CMD__SMART_POWER_OLED_ENABLE; + cmd.smart_power_oled_enable.header.payload_bytes = + sizeof(struct dmub_rb_cmd_smart_power_oled_enable_data) - sizeof(struct dmub_cmd_header); + cmd.smart_power_oled_enable.header.ret_status = 1; + cmd.smart_power_oled_enable.data.enable = enable; + cmd.smart_power_oled_enable.data.panel_inst = panel_inst; + cmd.smart_power_oled_enable.data.peak_nits = peak_nits; + cmd.smart_power_oled_enable.data.otg_inst = otg_inst; + cmd.smart_power_oled_enable.data.digfe_inst = link->link_enc->preferred_engine; + cmd.smart_power_oled_enable.data.digbe_inst = link->link_enc->transmitter; + + cmd.smart_power_oled_enable.data.debugcontrol = debug_control; + cmd.smart_power_oled_enable.data.triggerline = triggerline; + cmd.smart_power_oled_enable.data.fixed_max_cll = fixed_CLL; + + // send cmd + status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + return status; +} + +bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL) +{ + struct dc *dc = link->ctx->dc; + union dmub_rb_cmd cmd; + bool status = false; + unsigned int panel_inst = 0; + + // get panel_inst + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return status; + + // fill in cmd + memset(&cmd, 0, sizeof(cmd)); + + cmd.smart_power_oled_getmaxcll.header.type = DMUB_CMD__SMART_POWER_OLED; + cmd.smart_power_oled_getmaxcll.header.sub_type = DMUB_CMD__SMART_POWER_OLED_GETMAXCLL; + cmd.smart_power_oled_getmaxcll.header.payload_bytes = sizeof(cmd.smart_power_oled_getmaxcll.data); + cmd.smart_power_oled_getmaxcll.header.ret_status = 1; + + cmd.smart_power_oled_getmaxcll.data.input.panel_inst = panel_inst; + + // send cmd and wait for reply + status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); + + if (status) + *pCurrent_MaxCLL = cmd.smart_power_oled_getmaxcll.data.output.current_max_cll; + else + *pCurrent_MaxCLL = 0; + + return status; +} + uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, uint8_t dpia_port_index) { @@ -6349,7 +6489,7 @@ bool dc_is_cursor_limit_pending(struct dc *dc) return false; } -bool dc_can_clear_cursor_limit(struct dc *dc) +bool dc_can_clear_cursor_limit(const struct dc *dc) { uint32_t i; @@ -6378,3 +6518,576 @@ void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, if (dc->hwss.get_underflow_debug_data) dc->hwss.get_underflow_debug_data(dc, tg, out_data); } + +void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, + struct power_features *out_data) +{ + out_data->uclk_p_state = dc->current_state->clk_mgr->clks.p_state_change_support; + out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching; +} + +bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state) +{ + struct dc_state *context; + struct resource_context *res_ctx; + int i; + + if (!dc || !dc->current_state || !state) { + if (state) + state->state_valid = false; + return false; + } + + /* Initialize the state structure */ + memset(state, 0, sizeof(struct dc_register_software_state)); + + context = dc->current_state; + res_ctx = &context->res_ctx; + + /* Count active pipes and streams */ + state->active_pipe_count = 0; + state->active_stream_count = context->stream_count; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (res_ctx->pipe_ctx[i].stream) + state->active_pipe_count++; + } + + /* Capture HUBP programming state for each pipe */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + state->hubp[i].valid_stream = false; + if (!pipe_ctx->stream) + continue; + + state->hubp[i].valid_stream = true; + + /* HUBP register programming variables */ + if (pipe_ctx->stream_res.tg) + state->hubp[i].vtg_sel = pipe_ctx->stream_res.tg->inst; + + state->hubp[i].hubp_clock_enable = (pipe_ctx->plane_res.hubp != NULL) ? 1 : 0; + + state->hubp[i].valid_plane_state = false; + if (pipe_ctx->plane_state) { + state->hubp[i].valid_plane_state = true; + state->hubp[i].surface_pixel_format = pipe_ctx->plane_state->format; + state->hubp[i].rotation_angle = pipe_ctx->plane_state->rotation; + state->hubp[i].h_mirror_en = pipe_ctx->plane_state->horizontal_mirror ? 1 : 0; + + /* Surface size */ + if (pipe_ctx->plane_state->plane_size.surface_size.width > 0) { + state->hubp[i].surface_size_width = pipe_ctx->plane_state->plane_size.surface_size.width; + state->hubp[i].surface_size_height = pipe_ctx->plane_state->plane_size.surface_size.height; + } + + /* Viewport dimensions from scaler data */ + if (pipe_ctx->plane_state->src_rect.width > 0) { + state->hubp[i].pri_viewport_width = pipe_ctx->plane_state->src_rect.width; + state->hubp[i].pri_viewport_height = pipe_ctx->plane_state->src_rect.height; + state->hubp[i].pri_viewport_x_start = pipe_ctx->plane_state->src_rect.x; + state->hubp[i].pri_viewport_y_start = pipe_ctx->plane_state->src_rect.y; + } + + /* DCC settings */ + state->hubp[i].surface_dcc_en = (pipe_ctx->plane_state->dcc.enable) ? 1 : 0; + state->hubp[i].surface_dcc_ind_64b_blk = pipe_ctx->plane_state->dcc.independent_64b_blks; + state->hubp[i].surface_dcc_ind_128b_blk = pipe_ctx->plane_state->dcc.dcc_ind_blk; + + /* Surface pitch */ + state->hubp[i].surface_pitch = pipe_ctx->plane_state->plane_size.surface_pitch; + state->hubp[i].meta_pitch = pipe_ctx->plane_state->dcc.meta_pitch; + state->hubp[i].chroma_pitch = pipe_ctx->plane_state->plane_size.chroma_pitch; + state->hubp[i].meta_pitch_c = pipe_ctx->plane_state->dcc.meta_pitch_c; + + /* Surface addresses - primary */ + state->hubp[i].primary_surface_address_low = pipe_ctx->plane_state->address.grph.addr.low_part; + state->hubp[i].primary_surface_address_high = pipe_ctx->plane_state->address.grph.addr.high_part; + state->hubp[i].primary_meta_surface_address_low = pipe_ctx->plane_state->address.grph.meta_addr.low_part; + state->hubp[i].primary_meta_surface_address_high = pipe_ctx->plane_state->address.grph.meta_addr.high_part; + + /* TMZ settings */ + state->hubp[i].primary_surface_tmz = pipe_ctx->plane_state->address.tmz_surface; + state->hubp[i].primary_meta_surface_tmz = pipe_ctx->plane_state->address.tmz_surface; + + /* Tiling configuration */ + state->hubp[i].min_dc_gfx_version9 = false; + if (pipe_ctx->plane_state->tiling_info.gfxversion >= DcGfxVersion9) { + state->hubp[i].min_dc_gfx_version9 = true; + state->hubp[i].sw_mode = pipe_ctx->plane_state->tiling_info.gfx9.swizzle; + state->hubp[i].num_pipes = pipe_ctx->plane_state->tiling_info.gfx9.num_pipes; + state->hubp[i].num_banks = pipe_ctx->plane_state->tiling_info.gfx9.num_banks; + state->hubp[i].pipe_interleave = pipe_ctx->plane_state->tiling_info.gfx9.pipe_interleave; + state->hubp[i].num_shader_engines = pipe_ctx->plane_state->tiling_info.gfx9.num_shader_engines; + state->hubp[i].num_rb_per_se = pipe_ctx->plane_state->tiling_info.gfx9.num_rb_per_se; + state->hubp[i].num_pkrs = pipe_ctx->plane_state->tiling_info.gfx9.num_pkrs; + } + } + + /* DML Request Size Configuration */ + if (pipe_ctx->rq_regs.rq_regs_l.chunk_size > 0) { + state->hubp[i].rq_chunk_size = pipe_ctx->rq_regs.rq_regs_l.chunk_size; + state->hubp[i].rq_min_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_chunk_size; + state->hubp[i].rq_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size; + state->hubp[i].rq_min_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size; + state->hubp[i].rq_dpte_group_size = pipe_ctx->rq_regs.rq_regs_l.dpte_group_size; + state->hubp[i].rq_mpte_group_size = pipe_ctx->rq_regs.rq_regs_l.mpte_group_size; + state->hubp[i].rq_swath_height_l = pipe_ctx->rq_regs.rq_regs_l.swath_height; + state->hubp[i].rq_pte_row_height_l = pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear; + } + + /* Chroma request size configuration */ + if (pipe_ctx->rq_regs.rq_regs_c.chunk_size > 0) { + state->hubp[i].rq_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.chunk_size; + state->hubp[i].rq_min_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_chunk_size; + state->hubp[i].rq_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.meta_chunk_size; + state->hubp[i].rq_min_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_meta_chunk_size; + state->hubp[i].rq_dpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.dpte_group_size; + state->hubp[i].rq_mpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.mpte_group_size; + state->hubp[i].rq_swath_height_c = pipe_ctx->rq_regs.rq_regs_c.swath_height; + state->hubp[i].rq_pte_row_height_c = pipe_ctx->rq_regs.rq_regs_c.pte_row_height_linear; + } + + /* DML expansion modes */ + state->hubp[i].drq_expansion_mode = pipe_ctx->rq_regs.drq_expansion_mode; + state->hubp[i].prq_expansion_mode = pipe_ctx->rq_regs.prq_expansion_mode; + state->hubp[i].mrq_expansion_mode = pipe_ctx->rq_regs.mrq_expansion_mode; + state->hubp[i].crq_expansion_mode = pipe_ctx->rq_regs.crq_expansion_mode; + + /* DML DLG parameters - nominal */ + state->hubp[i].dst_y_per_vm_vblank = pipe_ctx->dlg_regs.dst_y_per_vm_vblank; + state->hubp[i].dst_y_per_row_vblank = pipe_ctx->dlg_regs.dst_y_per_row_vblank; + state->hubp[i].dst_y_per_vm_flip = pipe_ctx->dlg_regs.dst_y_per_vm_flip; + state->hubp[i].dst_y_per_row_flip = pipe_ctx->dlg_regs.dst_y_per_row_flip; + + /* DML prefetch settings */ + state->hubp[i].dst_y_prefetch = pipe_ctx->dlg_regs.dst_y_prefetch; + state->hubp[i].vratio_prefetch = pipe_ctx->dlg_regs.vratio_prefetch; + state->hubp[i].vratio_prefetch_c = pipe_ctx->dlg_regs.vratio_prefetch_c; + + /* TTU parameters */ + state->hubp[i].qos_level_low_wm = pipe_ctx->ttu_regs.qos_level_low_wm; + state->hubp[i].qos_level_high_wm = pipe_ctx->ttu_regs.qos_level_high_wm; + state->hubp[i].qos_level_flip = pipe_ctx->ttu_regs.qos_level_flip; + state->hubp[i].min_ttu_vblank = pipe_ctx->ttu_regs.min_ttu_vblank; + } + + /* Capture HUBBUB programming state */ + if (dc->res_pool->hubbub) { + /* Individual DET buffer sizes - software state variables that program DET registers */ + for (i = 0; i < 4 && i < dc->res_pool->pipe_count; i++) { + uint32_t det_size = res_ctx->pipe_ctx[i].det_buffer_size_kb; + switch (i) { + case 0: + state->hubbub.det0_size = det_size; + break; + case 1: + state->hubbub.det1_size = det_size; + break; + case 2: + state->hubbub.det2_size = det_size; + break; + case 3: + state->hubbub.det3_size = det_size; + break; + } + } + + /* Compression buffer configuration - software state that programs COMPBUF_SIZE register */ + // TODO: Handle logic for legacy DCN pre-DCN401 + state->hubbub.compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size; + } + + /* Capture DPP programming state for each pipe */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (!pipe_ctx->stream) + continue; + + state->dpp[i].dpp_clock_enable = (pipe_ctx->plane_res.dpp != NULL) ? 1 : 0; + + if (pipe_ctx->plane_state && pipe_ctx->plane_res.scl_data.recout.width > 0) { + /* Access dscl_prog_data directly - this contains the actual software state used for register programming */ + struct dscl_prog_data *dscl_data = &pipe_ctx->plane_res.scl_data.dscl_prog_data; + + /* Recout (Rectangle of Interest) configuration - software state that programs RECOUT registers */ + state->dpp[i].recout_start_x = dscl_data->recout.x; + state->dpp[i].recout_start_y = dscl_data->recout.y; + state->dpp[i].recout_width = dscl_data->recout.width; + state->dpp[i].recout_height = dscl_data->recout.height; + + /* MPC (Multiple Pipe/Plane Combiner) size - software state that programs MPC_SIZE registers */ + state->dpp[i].mpc_width = dscl_data->mpc_size.width; + state->dpp[i].mpc_height = dscl_data->mpc_size.height; + + /* DSCL mode - software state that programs SCL_MODE registers */ + state->dpp[i].dscl_mode = dscl_data->dscl_mode; + + /* Scaler ratios - software state that programs scale ratio registers (use actual programmed ratios) */ + state->dpp[i].horz_ratio_int = dscl_data->ratios.h_scale_ratio >> 19; // Extract integer part from programmed ratio + state->dpp[i].vert_ratio_int = dscl_data->ratios.v_scale_ratio >> 19; // Extract integer part from programmed ratio + + /* Basic scaler taps - software state that programs tap control registers (use actual programmed taps) */ + state->dpp[i].h_taps = dscl_data->taps.h_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back + state->dpp[i].v_taps = dscl_data->taps.v_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back + } + } + + /* Capture essential clock state for underflow analysis */ + if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz > 0) { + /* Core display clocks affecting bandwidth and timing */ + state->dccg.dispclk_khz = dc->clk_mgr->clks.dispclk_khz; + + /* Per-pipe clock configuration - only capture what's essential */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + if (pipe_ctx->stream) { + /* Essential clocks that directly affect underflow risk */ + state->dccg.dppclk_khz[i] = dc->clk_mgr->clks.dppclk_khz; + state->dccg.pixclk_khz[i] = pipe_ctx->stream->timing.pix_clk_100hz / 10; + state->dccg.dppclk_enable[i] = 1; + + /* DP stream clock only for DP signals */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + state->dccg.dpstreamclk_enable[i] = 1; + } else { + state->dccg.dpstreamclk_enable[i] = 0; + } + } else { + /* Inactive pipe - no clocks */ + state->dccg.dppclk_khz[i] = 0; + state->dccg.pixclk_khz[i] = 0; + state->dccg.dppclk_enable[i] = 0; + if (i < 4) { + state->dccg.dpstreamclk_enable[i] = 0; + } + } + } + + /* DSC clock state - only when actually using DSC */ + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe_ctx = (i < dc->res_pool->pipe_count) ? &res_ctx->pipe_ctx[i] : NULL; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) { + state->dccg.dscclk_khz[i] = 400000; /* Typical DSC clock frequency */ + } else { + state->dccg.dscclk_khz[i] = 0; + } + } + + /* SYMCLK32 LE Control - only the essential HPO state for underflow analysis */ + for (i = 0; i < 2; i++) { + state->dccg.symclk32_le_enable[i] = 0; /* Default: disabled */ + } + + } + + /* Capture essential DSC configuration for underflow analysis */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) { + /* DSC is enabled - capture essential configuration */ + state->dsc[i].dsc_clock_enable = 1; + + /* DSC configuration affecting bandwidth and timing */ + struct dc_dsc_config *dsc_cfg = &pipe_ctx->stream->timing.dsc_cfg; + state->dsc[i].dsc_num_slices_h = dsc_cfg->num_slices_h; + state->dsc[i].dsc_num_slices_v = dsc_cfg->num_slices_v; + state->dsc[i].dsc_bits_per_pixel = dsc_cfg->bits_per_pixel; + + /* OPP pipe source for DSC forwarding */ + if (pipe_ctx->stream_res.opp) { + state->dsc[i].dscrm_dsc_forward_enable = 1; + state->dsc[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst; + } else { + state->dsc[i].dscrm_dsc_forward_enable = 0; + state->dsc[i].dscrm_dsc_opp_pipe_source = 0; + } + } else { + /* DSC not enabled - clear all fields */ + memset(&state->dsc[i], 0, sizeof(state->dsc[i])); + } + } + + /* Capture MPC programming state - comprehensive register field coverage */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (pipe_ctx->plane_state && pipe_ctx->stream) { + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + + /* MPCC blending tree and mode control - capture actual blend configuration */ + state->mpc.mpcc_mode[i] = (plane_state->blend_tf.type != TF_TYPE_BYPASS) ? 1 : 0; + state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0; + state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0; + state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */ + state->mpc.mpcc_global_alpha[i] = plane_state->global_alpha_value; + state->mpc.mpcc_global_gain[i] = plane_state->global_alpha ? 255 : 0; + state->mpc.mpcc_bg_bpc[i] = 8; /* Standard 8-bit background */ + state->mpc.mpcc_bot_gain_mode[i] = 0; /* Standard gain mode */ + + /* MPCC blending tree connections - capture tree topology */ + if (pipe_ctx->bottom_pipe) { + state->mpc.mpcc_bot_sel[i] = pipe_ctx->bottom_pipe->pipe_idx; + } else { + state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */ + } + state->mpc.mpcc_top_sel[i] = pipe_ctx->pipe_idx; /* This pipe's DPP ID */ + + /* MPCC output gamma control - capture gamma programming */ + if (plane_state->gamma_correction.type != GAMMA_CS_TFM_1D && plane_state->gamma_correction.num_entries > 0) { + state->mpc.mpcc_ogam_mode[i] = 1; /* Gamma enabled */ + state->mpc.mpcc_ogam_select[i] = 0; /* Bank A selection */ + state->mpc.mpcc_ogam_pwl_disable[i] = 0; /* PWL enabled */ + } else { + state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass mode */ + state->mpc.mpcc_ogam_select[i] = 0; + state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */ + } + + /* MPCC pipe assignment and operational status */ + if (pipe_ctx->stream_res.opp) { + state->mpc.mpcc_opp_id[i] = pipe_ctx->stream_res.opp->inst; + } else { + state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */ + } + + /* MPCC status indicators - active pipe state */ + state->mpc.mpcc_idle[i] = 0; /* Active pipe - not idle */ + state->mpc.mpcc_busy[i] = 1; /* Active pipe - busy processing */ + + } else { + /* Pipe not active - set disabled/idle state for all fields */ + state->mpc.mpcc_mode[i] = 0; + state->mpc.mpcc_alpha_blend_mode[i] = 0; + state->mpc.mpcc_alpha_multiplied_mode[i] = 0; + state->mpc.mpcc_blnd_active_overlap_only[i] = 0; + state->mpc.mpcc_global_alpha[i] = 0; + state->mpc.mpcc_global_gain[i] = 0; + state->mpc.mpcc_bg_bpc[i] = 0; + state->mpc.mpcc_bot_gain_mode[i] = 0; + state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */ + state->mpc.mpcc_top_sel[i] = 0xF; /* No top connection */ + state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass */ + state->mpc.mpcc_ogam_select[i] = 0; + state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */ + state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */ + state->mpc.mpcc_idle[i] = 1; /* Idle */ + state->mpc.mpcc_busy[i] = 0; /* Not busy */ + } + } + + /* Capture OPP programming state for each pipe - comprehensive register field coverage */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (!pipe_ctx->stream) + continue; + + if (pipe_ctx->stream_res.opp) { + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + + /* OPP Pipe Control */ + state->opp[i].opp_pipe_clock_enable = 1; /* Active pipe has clock enabled */ + + /* Display Pattern Generator (DPG) Control - 19 fields */ + if (pipe_ctx->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { + state->opp[i].dpg_enable = 1; + } else { + /* Video mode - DPG disabled */ + state->opp[i].dpg_enable = 0; + } + + /* Format Control (FMT) - 18 fields */ + state->opp[i].fmt_pixel_encoding = timing->pixel_encoding; + + /* Chroma subsampling mode based on pixel encoding */ + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { + state->opp[i].fmt_subsampling_mode = 1; /* 4:2:0 subsampling */ + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + state->opp[i].fmt_subsampling_mode = 2; /* 4:2:2 subsampling */ + } else { + state->opp[i].fmt_subsampling_mode = 0; /* No subsampling (4:4:4) */ + } + + state->opp[i].fmt_cbcr_bit_reduction_bypass = (timing->pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + state->opp[i].fmt_stereosync_override = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0; + + /* Dithering control based on bit depth */ + if (timing->display_color_depth < COLOR_DEPTH_121212) { + state->opp[i].fmt_spatial_dither_frame_counter_max = 15; /* Typical frame counter max */ + state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; /* No bit swapping */ + state->opp[i].fmt_spatial_dither_enable = 1; + state->opp[i].fmt_spatial_dither_mode = 0; /* Spatial dithering mode */ + state->opp[i].fmt_spatial_dither_depth = timing->display_color_depth; + state->opp[i].fmt_temporal_dither_enable = 0; /* Spatial dithering preferred */ + } else { + state->opp[i].fmt_spatial_dither_frame_counter_max = 0; + state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; + state->opp[i].fmt_spatial_dither_enable = 0; + state->opp[i].fmt_spatial_dither_mode = 0; + state->opp[i].fmt_spatial_dither_depth = 0; + state->opp[i].fmt_temporal_dither_enable = 0; + } + + /* Truncation control for bit depth reduction */ + if (timing->display_color_depth < COLOR_DEPTH_121212) { + state->opp[i].fmt_truncate_enable = 1; + state->opp[i].fmt_truncate_depth = timing->display_color_depth; + state->opp[i].fmt_truncate_mode = 0; /* Round mode */ + } else { + state->opp[i].fmt_truncate_enable = 0; + state->opp[i].fmt_truncate_depth = 0; + state->opp[i].fmt_truncate_mode = 0; + } + + /* Data clamping control */ + state->opp[i].fmt_clamp_data_enable = 1; /* Clamping typically enabled */ + state->opp[i].fmt_clamp_color_format = timing->pixel_encoding; + + /* Dynamic expansion for limited range content */ + if (timing->pixel_encoding != PIXEL_ENCODING_RGB) { + state->opp[i].fmt_dynamic_exp_enable = 1; /* YCbCr typically needs expansion */ + state->opp[i].fmt_dynamic_exp_mode = 0; /* Standard expansion */ + } else { + state->opp[i].fmt_dynamic_exp_enable = 0; /* RGB typically full range */ + state->opp[i].fmt_dynamic_exp_mode = 0; + } + + /* Legacy field for compatibility */ + state->opp[i].fmt_bit_depth_control = timing->display_color_depth; + + /* Output Buffer (OPPBUF) Control - 6 fields */ + state->opp[i].oppbuf_active_width = timing->h_addressable; + state->opp[i].oppbuf_pixel_repetition = 0; /* No pixel repetition by default */ + + /* Multi-Stream Output (MSO) / ODM segmentation */ + if (pipe_ctx->next_odm_pipe) { + state->opp[i].oppbuf_display_segmentation = 1; /* Segmented display */ + state->opp[i].oppbuf_overlap_pixel_num = 0; /* ODM overlap pixels */ + } else { + state->opp[i].oppbuf_display_segmentation = 0; /* Single segment */ + state->opp[i].oppbuf_overlap_pixel_num = 0; + } + + /* 3D/Stereo control */ + if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) { + state->opp[i].oppbuf_3d_vact_space1_size = 30; /* Typical stereo blanking */ + state->opp[i].oppbuf_3d_vact_space2_size = 30; + } else { + state->opp[i].oppbuf_3d_vact_space1_size = 0; + state->opp[i].oppbuf_3d_vact_space2_size = 0; + } + + /* DSC Forward Config - 3 fields */ + if (timing->dsc_cfg.num_slices_h > 0) { + state->opp[i].dscrm_dsc_forward_enable = 1; + state->opp[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst; + state->opp[i].dscrm_dsc_forward_enable_status = 1; /* Status follows enable */ + } else { + state->opp[i].dscrm_dsc_forward_enable = 0; + state->opp[i].dscrm_dsc_opp_pipe_source = 0; + state->opp[i].dscrm_dsc_forward_enable_status = 0; + } + } else { + /* No OPP resource - set all fields to disabled state */ + memset(&state->opp[i], 0, sizeof(state->opp[i])); + } + } + + /* Capture OPTC programming state for each pipe - comprehensive register field coverage */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (!pipe_ctx->stream) + continue; + + if (pipe_ctx->stream_res.tg) { + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + + state->optc[i].otg_master_inst = pipe_ctx->stream_res.tg->inst; + + /* OTG_CONTROL register - 5 fields */ + state->optc[i].otg_master_enable = 1; /* Active stream */ + state->optc[i].otg_disable_point_cntl = 0; /* Normal operation */ + state->optc[i].otg_start_point_cntl = 0; /* Normal start */ + state->optc[i].otg_field_number_cntl = (timing->flags.INTERLACE) ? 1 : 0; + state->optc[i].otg_out_mux = 0; /* Direct output */ + + /* OTG Horizontal Timing - 7 fields */ + state->optc[i].otg_h_total = timing->h_total; + state->optc[i].otg_h_blank_start = timing->h_addressable; + state->optc[i].otg_h_blank_end = timing->h_total - timing->h_front_porch; + state->optc[i].otg_h_sync_start = timing->h_addressable + timing->h_front_porch; + state->optc[i].otg_h_sync_end = timing->h_addressable + timing->h_front_porch + timing->h_sync_width; + state->optc[i].otg_h_sync_polarity = timing->flags.HSYNC_POSITIVE_POLARITY ? 0 : 1; + state->optc[i].otg_h_timing_div_mode = (pipe_ctx->next_odm_pipe) ? 1 : 0; /* ODM divide mode */ + + /* OTG Vertical Timing - 7 fields */ + state->optc[i].otg_v_total = timing->v_total; + state->optc[i].otg_v_blank_start = timing->v_addressable; + state->optc[i].otg_v_blank_end = timing->v_total - timing->v_front_porch; + state->optc[i].otg_v_sync_start = timing->v_addressable + timing->v_front_porch; + state->optc[i].otg_v_sync_end = timing->v_addressable + timing->v_front_porch + timing->v_sync_width; + state->optc[i].otg_v_sync_polarity = timing->flags.VSYNC_POSITIVE_POLARITY ? 0 : 1; + state->optc[i].otg_v_sync_mode = 0; /* Normal sync mode */ + + /* Initialize remaining core fields with appropriate defaults */ + // TODO: Update logic for accurate vtotal min/max + state->optc[i].otg_v_total_max = timing->v_total + 100; /* Typical DRR range */ + state->optc[i].otg_v_total_min = timing->v_total - 50; + state->optc[i].otg_v_total_mid = timing->v_total; + + /* ODM configuration */ + // TODO: Update logic to have complete ODM mappings (e.g. 3:1 and 4:1) stored in single pipe + if (pipe_ctx->next_odm_pipe) { + state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0; + state->optc[i].optc_seg1_src_sel = pipe_ctx->next_odm_pipe->stream_res.opp ? pipe_ctx->next_odm_pipe->stream_res.opp->inst : 0; + state->optc[i].optc_num_of_input_segment = 1; /* 2 segments - 1 */ + } else { + state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0; + state->optc[i].optc_seg1_src_sel = 0; + state->optc[i].optc_num_of_input_segment = 0; /* Single segment */ + } + + /* DSC configuration */ + if (timing->dsc_cfg.num_slices_h > 0) { + state->optc[i].optc_dsc_mode = 1; /* DSC enabled */ + state->optc[i].optc_dsc_bytes_per_pixel = timing->dsc_cfg.bits_per_pixel / 16; /* Convert to bytes */ + state->optc[i].optc_dsc_slice_width = timing->h_addressable / timing->dsc_cfg.num_slices_h; + } else { + state->optc[i].optc_dsc_mode = 0; + state->optc[i].optc_dsc_bytes_per_pixel = 0; + state->optc[i].optc_dsc_slice_width = 0; + } + + /* Essential control fields */ + state->optc[i].otg_stereo_enable = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0; + state->optc[i].otg_interlace_enable = timing->flags.INTERLACE ? 1 : 0; + state->optc[i].otg_clock_enable = 1; /* OTG clock enabled */ + state->optc[i].vtg0_enable = 1; /* VTG enabled for timing generation */ + + /* Initialize other key fields to defaults */ + state->optc[i].optc_input_pix_clk_en = 1; + state->optc[i].optc_segment_width = (pipe_ctx->next_odm_pipe) ? (timing->h_addressable / 2) : timing->h_addressable; + state->optc[i].otg_vready_offset = 1; + state->optc[i].otg_vstartup_start = timing->v_addressable + 10; + state->optc[i].otg_vupdate_offset = 0; + state->optc[i].otg_vupdate_width = 5; + } else { + /* No timing generator resource - initialize all fields to 0 */ + memset(&state->optc[i], 0, sizeof(state->optc[i])); + } + } + + state->state_valid = true; + return true; +} + +void dc_log_preos_dmcub_info(const struct dc *dc) +{ + dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index d82b1cb467f4..e2763b60482a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -32,6 +32,13 @@ #include "resource.h" #include "dc_dmub_srv.h" #include "dc_state_priv.h" +#include "opp.h" +#include "dsc.h" +#include "dchubbub.h" +#include "dccg.h" +#include "abm.h" +#include "dcn10/dcn10_hubbub.h" +#include "dce/dmub_hw_lock_mgr.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) #define MAX_NUM_MCACHE 8 @@ -258,7 +265,7 @@ void color_space_to_black_color( black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED]; break; - /** + /* * Remove default and add case for all color space * so when we forget to add new color space * compiler will give a warning @@ -755,11 +762,13 @@ void hwss_build_fast_sequence(struct dc *dc, block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } - if (dc->hwss.fams2_global_control_lock_fast) { - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = true; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); - block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST; + if (dc->hwss.dmub_hw_control_lock_fast) { + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = true; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = + dc_state_is_fams2_in_use(dc, context) || + dmub_hw_lock_mgr_does_link_require_lock(dc, stream->link); + block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST; (*num_steps)++; } if (dc->hwss.pipe_control_lock) { @@ -784,7 +793,7 @@ void hwss_build_fast_sequence(struct dc *dc, while (current_mpc_pipe) { if (current_mpc_pipe->plane_state) { if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) { - block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_flip_control_gsl_params.hubp = current_mpc_pipe->plane_res.hubp; block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate; block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; (*num_steps)++; @@ -894,11 +903,11 @@ void hwss_build_fast_sequence(struct dc *dc, block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } - if (dc->hwss.fams2_global_control_lock_fast) { - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = false; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); - block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST; + if (dc->hwss.dmub_hw_control_lock_fast) { + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = false; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); + block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST; (*num_steps)++; } @@ -911,6 +920,13 @@ void hwss_build_fast_sequence(struct dc *dc, current_mpc_pipe->stream && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.bits.addr_update && !current_mpc_pipe->plane_state->skip_manual_trigger) { + if (dc->hwss.program_cursor_offload_now) { + block_sequence[*num_steps].params.program_cursor_update_now_params.dc = dc; + block_sequence[*num_steps].params.program_cursor_update_now_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = PROGRAM_CURSOR_UPDATE_NOW; + (*num_steps)++; + } + block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; (*num_steps)++; @@ -942,8 +958,9 @@ void hwss_execute_sequence(struct dc *dc, params->pipe_control_lock_params.lock); break; case HUBP_SET_FLIP_CONTROL_GSL: - dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx, - params->set_flip_control_gsl_params.flip_immediate); + params->set_flip_control_gsl_params.hubp->funcs->hubp_set_flip_control_surface_gsl( + params->set_flip_control_gsl_params.hubp, + params->set_flip_control_gsl_params.flip_immediate); break; case HUBP_PROGRAM_TRIPLEBUFFER: dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc, @@ -1001,8 +1018,301 @@ void hwss_execute_sequence(struct dc *dc, params->wait_for_dcc_meta_propagation_params.dc, params->wait_for_dcc_meta_propagation_params.top_pipe_to_program); break; - case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST: - dc->hwss.fams2_global_control_lock_fast(params); + case DMUB_HW_CONTROL_LOCK_FAST: + dc->hwss.dmub_hw_control_lock_fast(params); + break; + case HUBP_PROGRAM_SURFACE_CONFIG: + hwss_program_surface_config(params); + break; + case HUBP_PROGRAM_MCACHE_ID: + hwss_program_mcache_id_and_split_coordinate(params); + break; + case PROGRAM_CURSOR_UPDATE_NOW: + dc->hwss.program_cursor_offload_now( + params->program_cursor_update_now_params.dc, + params->program_cursor_update_now_params.pipe_ctx); + break; + case HUBP_WAIT_PIPE_READ_START: + params->hubp_wait_pipe_read_start_params.hubp->funcs->hubp_wait_pipe_read_start( + params->hubp_wait_pipe_read_start_params.hubp); + break; + case HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM: + dc->hwss.apply_update_flags_for_phantom(params->apply_update_flags_for_phantom_params.pipe_ctx); + break; + case HWS_UPDATE_PHANTOM_VP_POSITION: + dc->hwss.update_phantom_vp_position(params->update_phantom_vp_position_params.dc, + params->update_phantom_vp_position_params.context, + params->update_phantom_vp_position_params.pipe_ctx); + break; + case OPTC_SET_ODM_COMBINE: + hwss_set_odm_combine(params); + break; + case OPTC_SET_ODM_BYPASS: + hwss_set_odm_bypass(params); + break; + case OPP_PIPE_CLOCK_CONTROL: + hwss_opp_pipe_clock_control(params); + break; + case OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL: + hwss_opp_program_left_edge_extra_pixel(params); + break; + case DCCG_SET_DTO_DSCCLK: + hwss_dccg_set_dto_dscclk(params); + break; + case DSC_SET_CONFIG: + hwss_dsc_set_config(params); + break; + case DSC_ENABLE: + hwss_dsc_enable(params); + break; + case TG_SET_DSC_CONFIG: + hwss_tg_set_dsc_config(params); + break; + case DSC_DISCONNECT: + hwss_dsc_disconnect(params); + break; + case DSC_READ_STATE: + hwss_dsc_read_state(params); + break; + case DSC_CALCULATE_AND_SET_CONFIG: + hwss_dsc_calculate_and_set_config(params); + break; + case DSC_ENABLE_WITH_OPP: + hwss_dsc_enable_with_opp(params); + break; + case TG_PROGRAM_GLOBAL_SYNC: + hwss_tg_program_global_sync(params); + break; + case TG_WAIT_FOR_STATE: + hwss_tg_wait_for_state(params); + break; + case TG_SET_VTG_PARAMS: + hwss_tg_set_vtg_params(params); + break; + case TG_SETUP_VERTICAL_INTERRUPT2: + hwss_tg_setup_vertical_interrupt2(params); + break; + case DPP_SET_HDR_MULTIPLIER: + hwss_dpp_set_hdr_multiplier(params); + break; + case HUBP_PROGRAM_DET_SIZE: + hwss_program_det_size(params); + break; + case HUBP_PROGRAM_DET_SEGMENTS: + hwss_program_det_segments(params); + break; + case OPP_SET_DYN_EXPANSION: + hwss_opp_set_dyn_expansion(params); + break; + case OPP_PROGRAM_FMT: + hwss_opp_program_fmt(params); + break; + case OPP_PROGRAM_BIT_DEPTH_REDUCTION: + hwss_opp_program_bit_depth_reduction(params); + break; + case OPP_SET_DISP_PATTERN_GENERATOR: + hwss_opp_set_disp_pattern_generator(params); + break; + case ABM_SET_PIPE: + hwss_set_abm_pipe(params); + break; + case ABM_SET_LEVEL: + hwss_set_abm_level(params); + break; + case ABM_SET_IMMEDIATE_DISABLE: + hwss_set_abm_immediate_disable(params); + break; + case MPC_REMOVE_MPCC: + hwss_mpc_remove_mpcc(params); + break; + case OPP_SET_MPCC_DISCONNECT_PENDING: + hwss_opp_set_mpcc_disconnect_pending(params); + break; + case DC_SET_OPTIMIZED_REQUIRED: + hwss_dc_set_optimized_required(params); + break; + case HUBP_DISCONNECT: + hwss_hubp_disconnect(params); + break; + case HUBBUB_FORCE_PSTATE_CHANGE_CONTROL: + hwss_hubbub_force_pstate_change_control(params); + break; + case TG_ENABLE_CRTC: + hwss_tg_enable_crtc(params); + break; + case TG_SET_GSL: + hwss_tg_set_gsl(params); + break; + case TG_SET_GSL_SOURCE_SELECT: + hwss_tg_set_gsl_source_select(params); + break; + case HUBP_WAIT_FLIP_PENDING: + hwss_hubp_wait_flip_pending(params); + break; + case TG_WAIT_DOUBLE_BUFFER_PENDING: + hwss_tg_wait_double_buffer_pending(params); + break; + case UPDATE_FORCE_PSTATE: + hwss_update_force_pstate(params); + break; + case HUBBUB_APPLY_DEDCN21_147_WA: + hwss_hubbub_apply_dedcn21_147_wa(params); + break; + case HUBBUB_ALLOW_SELF_REFRESH_CONTROL: + hwss_hubbub_allow_self_refresh_control(params); + break; + case TG_GET_FRAME_COUNT: + hwss_tg_get_frame_count(params); + break; + case MPC_SET_DWB_MUX: + hwss_mpc_set_dwb_mux(params); + break; + case MPC_DISABLE_DWB_MUX: + hwss_mpc_disable_dwb_mux(params); + break; + case MCIF_WB_CONFIG_BUF: + hwss_mcif_wb_config_buf(params); + break; + case MCIF_WB_CONFIG_ARB: + hwss_mcif_wb_config_arb(params); + break; + case MCIF_WB_ENABLE: + hwss_mcif_wb_enable(params); + break; + case MCIF_WB_DISABLE: + hwss_mcif_wb_disable(params); + break; + case DWBC_ENABLE: + hwss_dwbc_enable(params); + break; + case DWBC_DISABLE: + hwss_dwbc_disable(params); + break; + case DWBC_UPDATE: + hwss_dwbc_update(params); + break; + case HUBP_UPDATE_MALL_SEL: + hwss_hubp_update_mall_sel(params); + break; + case HUBP_PREPARE_SUBVP_BUFFERING: + hwss_hubp_prepare_subvp_buffering(params); + break; + case HUBP_SET_BLANK_EN: + hwss_hubp_set_blank_en(params); + break; + case HUBP_DISABLE_CONTROL: + hwss_hubp_disable_control(params); + break; + case HUBBUB_SOFT_RESET: + hwss_hubbub_soft_reset(params); + break; + case HUBP_CLK_CNTL: + hwss_hubp_clk_cntl(params); + break; + case HUBP_INIT: + hwss_hubp_init(params); + break; + case HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS: + hwss_hubp_set_vm_system_aperture_settings(params); + break; + case HUBP_SET_FLIP_INT: + hwss_hubp_set_flip_int(params); + break; + case DPP_DPPCLK_CONTROL: + hwss_dpp_dppclk_control(params); + break; + case DISABLE_PHANTOM_CRTC: + hwss_disable_phantom_crtc(params); + break; + case DSC_PG_STATUS: + hwss_dsc_pg_status(params); + break; + case DSC_WAIT_DISCONNECT_PENDING_CLEAR: + hwss_dsc_wait_disconnect_pending_clear(params); + break; + case DSC_DISABLE: + hwss_dsc_disable(params); + break; + case DCCG_SET_REF_DSCCLK: + hwss_dccg_set_ref_dscclk(params); + break; + case DPP_PG_CONTROL: + hwss_dpp_pg_control(params); + break; + case HUBP_PG_CONTROL: + hwss_hubp_pg_control(params); + break; + case HUBP_RESET: + hwss_hubp_reset(params); + break; + case DPP_RESET: + hwss_dpp_reset(params); + break; + case DPP_ROOT_CLOCK_CONTROL: + hwss_dpp_root_clock_control(params); + break; + case DC_IP_REQUEST_CNTL: + hwss_dc_ip_request_cntl(params); + break; + case DCCG_UPDATE_DPP_DTO: + hwss_dccg_update_dpp_dto(params); + break; + case HUBP_VTG_SEL: + hwss_hubp_vtg_sel(params); + break; + case HUBP_SETUP2: + hwss_hubp_setup2(params); + break; + case HUBP_SETUP: + hwss_hubp_setup(params); + break; + case HUBP_SET_UNBOUNDED_REQUESTING: + hwss_hubp_set_unbounded_requesting(params); + break; + case HUBP_SETUP_INTERDEPENDENT2: + hwss_hubp_setup_interdependent2(params); + break; + case HUBP_SETUP_INTERDEPENDENT: + hwss_hubp_setup_interdependent(params); + break; + case DPP_SET_CURSOR_MATRIX: + hwss_dpp_set_cursor_matrix(params); + break; + case MPC_UPDATE_BLENDING: + hwss_mpc_update_blending(params); + break; + case MPC_ASSERT_IDLE_MPCC: + hwss_mpc_assert_idle_mpcc(params); + break; + case MPC_INSERT_PLANE: + hwss_mpc_insert_plane(params); + break; + case DPP_SET_SCALER: + hwss_dpp_set_scaler(params); + break; + case HUBP_MEM_PROGRAM_VIEWPORT: + hwss_hubp_mem_program_viewport(params); + break; + case ABORT_CURSOR_OFFLOAD_UPDATE: + hwss_abort_cursor_offload_update(params); + break; + case SET_CURSOR_ATTRIBUTE: + hwss_set_cursor_attribute(params); + break; + case SET_CURSOR_POSITION: + hwss_set_cursor_position(params); + break; + case SET_CURSOR_SDR_WHITE_LEVEL: + hwss_set_cursor_sdr_white_level(params); + break; + case PROGRAM_OUTPUT_CSC: + hwss_program_output_csc(params); + break; + case HUBP_SET_BLANK: + hwss_hubp_set_blank(params); + break; + case PHANTOM_HUBP_POST_ENABLE: + hwss_phantom_hubp_post_enable(params); break; default: ASSERT(false); @@ -1011,6 +1321,338 @@ void hwss_execute_sequence(struct dc *dc, } } +/* + * Helper function to add OPTC pipe control lock to block sequence + */ +void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool lock) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.lock = lock; + seq_state->steps[*seq_state->num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP set flip control GSL to block sequence + */ +void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool flip_immediate) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.flip_immediate = flip_immediate; + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP program triplebuffer to block sequence + */ +void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enableTripleBuffer) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.enableTripleBuffer = enableTripleBuffer; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP update plane address to block sequence + */ +void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_PLANE_ADDR; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DPP set input transfer function to block sequence + */ +void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_plane_state *plane_state) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.plane_state = plane_state; + seq_state->steps[*seq_state->num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DPP program gamut remap to block sequence + */ +void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_gamut_remap_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_GAMUT_REMAP; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DPP program bias and scale to block sequence + */ +void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_bias_and_scale_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add OPTC program manual trigger to block sequence + */ +void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_manual_trigger_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DPP set output transfer function to block sequence + */ +void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_stream_state *stream) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.stream = stream; + seq_state->steps[*seq_state->num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC update visual confirm to block sequence + */ +void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.mpcc_id = mpcc_id; + seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_VISUAL_CONFIRM; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC power on MPC mem PWR to block sequence + */ +void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state, + struct mpc *mpc, + int mpcc_id, + bool power_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = mpcc_id; + seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.power_on = power_on; + seq_state->steps[*seq_state->num_steps].func = MPC_POWER_ON_MPC_MEM_PWR; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC set output CSC to block sequence + */ +void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state, + struct mpc *mpc, + int opp_id, + const uint16_t *regval, + enum mpc_output_csc_mode ocsc_mode) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.opp_id = opp_id; + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.regval = regval; + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.ocsc_mode = ocsc_mode; + seq_state->steps[*seq_state->num_steps].func = MPC_SET_OUTPUT_CSC; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC set OCSC default to block sequence + */ +void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state, + struct mpc *mpc, + int opp_id, + enum dc_color_space colorspace, + enum mpc_output_csc_mode ocsc_mode) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.opp_id = opp_id; + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.color_space = colorspace; + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.ocsc_mode = ocsc_mode; + seq_state->steps[*seq_state->num_steps].func = MPC_SET_OCSC_DEFAULT; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DMUB send DMCUB command to block sequence + */ +void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state, + struct dc_context *ctx, + union dmub_rb_cmd *cmd, + enum dm_dmub_wait_type wait_type) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.ctx = ctx; + seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.cmd = cmd; + seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.wait_type = wait_type; + seq_state->steps[*seq_state->num_steps].func = DMUB_SEND_DMCUB_CMD; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DMUB SubVP save surface address to block sequence + */ +void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state, + struct dc_dmub_srv *dc_dmub_srv, + struct dc_plane_address *addr, + uint8_t subvp_index) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc_dmub_srv; + seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.addr = addr; + seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.subvp_index = subvp_index; + seq_state->steps[*seq_state->num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP wait for DCC meta propagation to block sequence + */ +void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *top_pipe_to_program) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.top_pipe_to_program = top_pipe_to_program; + seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FOR_DCC_META_PROP; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP wait pipe read start to block sequence + */ +void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.hubp_wait_pipe_read_start_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_PIPE_READ_START; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HWS apply update flags for phantom to block sequence + */ +void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.apply_update_flags_for_phantom_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HWS update phantom VP position to block sequence + */ +void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state, + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.context = context; + seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = HWS_UPDATE_PHANTOM_VP_POSITION; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add OPTC set ODM combine to block sequence + */ +void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state, + struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count, + int odm_slice_width, int last_odm_slice_width) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.tg = tg; + memcpy(seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_inst, opp_inst, sizeof(int) * MAX_PIPES); + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_head_count = opp_head_count; + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.odm_slice_width = odm_slice_width; + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.last_odm_slice_width = last_odm_slice_width; + seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_COMBINE; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add OPTC set ODM bypass to block sequence + */ +void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state, + struct timing_generator *tg, struct dc_crtc_timing *timing) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.timing = timing; + seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_BYPASS; + (*seq_state->num_steps)++; + } +} + void hwss_send_dmcub_cmd(union block_sequence_params *params) { struct dc_context *ctx = params->send_dmcub_cmd_params.ctx; @@ -1020,6 +1662,276 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params) dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type); } +/* + * Helper function to add TG program global sync to block sequence + */ +void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int vready_offset, + unsigned int vstartup_lines, + unsigned int vupdate_offset_pixels, + unsigned int vupdate_vupdate_width_pixels, + unsigned int pstate_keepout_start_lines) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vready_offset = vready_offset; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vstartup_lines = vstartup_lines; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_offset_pixels = vupdate_offset_pixels; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_vupdate_width_pixels = vupdate_vupdate_width_pixels; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.pstate_keepout_start_lines = pstate_keepout_start_lines; + seq_state->steps[*seq_state->num_steps].func = TG_PROGRAM_GLOBAL_SYNC; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add TG wait for state to block sequence + */ +void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state, + struct timing_generator *tg, + enum crtc_state state) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.state = state; + seq_state->steps[*seq_state->num_steps].func = TG_WAIT_FOR_STATE; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add TG set VTG params to block sequence + */ +void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct dc_crtc_timing *dc_crtc_timing, + bool program_fp2) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.timing = dc_crtc_timing; + seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.program_fp2 = program_fp2; + seq_state->steps[*seq_state->num_steps].func = TG_SET_VTG_PARAMS; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add TG setup vertical interrupt2 to block sequence + */ +void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state, + struct timing_generator *tg, int start_line) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.start_line = start_line; + seq_state->steps[*seq_state->num_steps].func = TG_SETUP_VERTICAL_INTERRUPT2; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DPP set HDR multiplier to block sequence + */ +void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state, + struct dpp *dpp, uint32_t hw_mult) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.hw_mult = hw_mult; + seq_state->steps[*seq_state->num_steps].func = DPP_SET_HDR_MULTIPLIER; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP program DET size to block sequence + */ +void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + unsigned int hubp_inst, + unsigned int det_buffer_size_kb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubp_inst = hubp_inst; + seq_state->steps[*seq_state->num_steps].params.program_det_size_params.det_buffer_size_kb = det_buffer_size_kb; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SIZE; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_hubp_pipe_mcache_regs *mcache_regs) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.mcache_regs = mcache_regs; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_MCACHE_ID; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + bool enable, + bool wait) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.enable = enable; + seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.wait = wait; + seq_state->steps[*seq_state->num_steps].func = HUBBUB_FORCE_PSTATE_CHANGE_CONTROL; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP program DET segments to block sequence + */ +void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + unsigned int hubp_inst, + unsigned int det_size) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubp_inst = hubp_inst; + seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.det_size = det_size; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SEGMENTS; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add OPP set dynamic expansion to block sequence + */ +void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum dc_color_space color_space, + enum dc_color_depth color_depth, + enum signal_type signal) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_space = color_space; + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_depth = color_depth; + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.signal = signal; + seq_state->steps[*seq_state->num_steps].func = OPP_SET_DYN_EXPANSION; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add OPP program FMT to block sequence + */ +void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + struct bit_depth_reduction_params *fmt_bit_depth, + struct clamping_and_pixel_encoding_params *clamping) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.fmt_bit_depth = fmt_bit_depth; + seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.clamping = clamping; + seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_FMT; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum dc_pixel_encoding pixel_encoding, + bool is_otg_master) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL; + seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.pixel_encoding = pixel_encoding; + seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.is_otg_master = is_otg_master; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add ABM set pipe to block sequence + */ +void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = ABM_SET_PIPE; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add ABM set level to block sequence + */ +void hwss_add_abm_set_level(struct block_sequence_state *seq_state, + struct abm *abm, + uint32_t abm_level) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm = abm; + seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm_level = abm_level; + seq_state->steps[*seq_state->num_steps].func = ABM_SET_LEVEL; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add TG enable CRTC to block sequence + */ +void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_enable_crtc_params.tg = tg; + seq_state->steps[*seq_state->num_steps].func = TG_ENABLE_CRTC; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP wait flip pending to block sequence + */ +void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state, + struct hubp *hubp, + unsigned int timeout_us, + unsigned int polling_interval_us) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.timeout_us = timeout_us; + seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.polling_interval_us = polling_interval_us; + seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FLIP_PENDING; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add TG wait double buffer pending to block sequence + */ +void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state, + struct timing_generator *tg, + unsigned int timeout_us, + unsigned int polling_interval_us) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.timeout_us = timeout_us; + seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.polling_interval_us = polling_interval_us; + seq_state->steps[*seq_state->num_steps].func = TG_WAIT_DOUBLE_BUFFER_PENDING; + (*seq_state->num_steps)++; + } +} + void hwss_program_manual_trigger(union block_sequence_params *params) { struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx; @@ -1046,12 +1958,6 @@ void hwss_setup_dpp(union block_sequence_params *params) plane_state->color_space, NULL); } - - if (dpp && dpp->funcs->set_cursor_matrix) { - dpp->funcs->set_cursor_matrix(dpp, - plane_state->color_space, - plane_state->cursor_csc_color_matrix); - } } void hwss_program_bias_and_scale(union block_sequence_params *params) @@ -1062,9 +1968,8 @@ void hwss_program_bias_and_scale(union block_sequence_params *params) struct dc_bias_and_scale bns_params = plane_state->bias_and_scale; //TODO :for CNVC set scale and bias registers if necessary - if (dpp->funcs->dpp_program_bias_and_scale) { + if (dpp->funcs->dpp_program_bias_and_scale) dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); - } } void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params) @@ -1114,6 +2019,39 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params) dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index); } +void hwss_program_surface_config(union block_sequence_params *params) +{ + struct hubp *hubp = params->program_surface_config_params.hubp; + enum surface_pixel_format format = params->program_surface_config_params.format; + struct dc_tiling_info *tiling_info = params->program_surface_config_params.tiling_info; + struct plane_size size = params->program_surface_config_params.plane_size; + enum dc_rotation_angle rotation = params->program_surface_config_params.rotation; + struct dc_plane_dcc_param *dcc = params->program_surface_config_params.dcc; + bool horizontal_mirror = params->program_surface_config_params.horizontal_mirror; + int compat_level = params->program_surface_config_params.compat_level; + + hubp->funcs->hubp_program_surface_config( + hubp, + format, + tiling_info, + &size, + rotation, + dcc, + horizontal_mirror, + compat_level); + + hubp->power_gated = false; +} + +void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params) +{ + struct hubp *hubp = params->program_mcache_id_and_split_coordinate.hubp; + struct dml2_hubp_pipe_mcache_regs *mcache_regs = params->program_mcache_id_and_split_coordinate.mcache_regs; + + hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, mcache_regs); + +} + void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color) @@ -1188,6 +2126,7 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; + for (i = 0; i < MAX_PIPES; i++) { int count = 0; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1264,3 +2203,1869 @@ void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_cont if (dc->hwss.program_outstanding_updates) dc->hwss.program_outstanding_updates(dc, dc_context); } + +void hwss_set_odm_combine(union block_sequence_params *params) +{ + struct timing_generator *tg = params->set_odm_combine_params.tg; + int *opp_inst = params->set_odm_combine_params.opp_inst; + int opp_head_count = params->set_odm_combine_params.opp_head_count; + int odm_slice_width = params->set_odm_combine_params.odm_slice_width; + int last_odm_slice_width = params->set_odm_combine_params.last_odm_slice_width; + + if (tg && tg->funcs->set_odm_combine) + tg->funcs->set_odm_combine(tg, opp_inst, opp_head_count, + odm_slice_width, last_odm_slice_width); +} + +void hwss_set_odm_bypass(union block_sequence_params *params) +{ + struct timing_generator *tg = params->set_odm_bypass_params.tg; + const struct dc_crtc_timing *timing = params->set_odm_bypass_params.timing; + + if (tg && tg->funcs->set_odm_bypass) + tg->funcs->set_odm_bypass(tg, timing); +} + +void hwss_opp_pipe_clock_control(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_pipe_clock_control_params.opp; + bool enable = params->opp_pipe_clock_control_params.enable; + + if (opp && opp->funcs->opp_pipe_clock_control) + opp->funcs->opp_pipe_clock_control(opp, enable); +} + +void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_program_left_edge_extra_pixel_params.opp; + enum dc_pixel_encoding pixel_encoding = params->opp_program_left_edge_extra_pixel_params.pixel_encoding; + bool is_otg_master = params->opp_program_left_edge_extra_pixel_params.is_otg_master; + + if (opp && opp->funcs->opp_program_left_edge_extra_pixel) + opp->funcs->opp_program_left_edge_extra_pixel(opp, pixel_encoding, is_otg_master); +} + +void hwss_dccg_set_dto_dscclk(union block_sequence_params *params) +{ + struct dccg *dccg = params->dccg_set_dto_dscclk_params.dccg; + int inst = params->dccg_set_dto_dscclk_params.inst; + int num_slices_h = params->dccg_set_dto_dscclk_params.num_slices_h; + + if (dccg && dccg->funcs->set_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, inst, num_slices_h); +} + +void hwss_dsc_set_config(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_set_config_params.dsc; + struct dsc_config *dsc_cfg = params->dsc_set_config_params.dsc_cfg; + struct dsc_optc_config *dsc_optc_cfg = params->dsc_set_config_params.dsc_optc_cfg; + + if (dsc && dsc->funcs->dsc_set_config) + dsc->funcs->dsc_set_config(dsc, dsc_cfg, dsc_optc_cfg); +} + +void hwss_dsc_enable(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_enable_params.dsc; + int opp_inst = params->dsc_enable_params.opp_inst; + + if (dsc && dsc->funcs->dsc_enable) + dsc->funcs->dsc_enable(dsc, opp_inst); +} + +void hwss_tg_set_dsc_config(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_dsc_config_params.tg; + enum optc_dsc_mode optc_dsc_mode = OPTC_DSC_DISABLED; + uint32_t bytes_per_pixel = 0; + uint32_t slice_width = 0; + + if (params->tg_set_dsc_config_params.enable) { + struct dsc_optc_config *dsc_optc_cfg = params->tg_set_dsc_config_params.dsc_optc_cfg; + + if (dsc_optc_cfg) { + bytes_per_pixel = dsc_optc_cfg->bytes_per_pixel; + slice_width = dsc_optc_cfg->slice_width; + optc_dsc_mode = dsc_optc_cfg->is_pixel_format_444 ? + OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; + } + } + + if (tg && tg->funcs->set_dsc_config) + tg->funcs->set_dsc_config(tg, optc_dsc_mode, bytes_per_pixel, slice_width); +} + +void hwss_dsc_disconnect(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_disconnect_params.dsc; + + if (dsc && dsc->funcs->dsc_disconnect) + dsc->funcs->dsc_disconnect(dsc); +} + +void hwss_dsc_read_state(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_read_state_params.dsc; + struct dcn_dsc_state *dsc_state = params->dsc_read_state_params.dsc_state; + + if (dsc && dsc->funcs->dsc_read_state) + dsc->funcs->dsc_read_state(dsc, dsc_state); +} + +void hwss_dsc_calculate_and_set_config(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->dsc_calculate_and_set_config_params.pipe_ctx; + struct pipe_ctx *top_pipe = pipe_ctx; + bool enable = params->dsc_calculate_and_set_config_params.enable; + int opp_cnt = params->dsc_calculate_and_set_config_params.opp_cnt; + + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc_stream_state *stream = pipe_ctx->stream; + + if (!dsc || !enable) + return; + + /* Calculate DSC configuration - extracted from dcn32_update_dsc_on_stream */ + struct dsc_config dsc_cfg; + + while (top_pipe->prev_odm_pipe) + top_pipe = top_pipe->prev_odm_pipe; + + dsc_cfg.pic_width = (stream->timing.h_addressable + top_pipe->dsc_padding_params.dsc_hactive_padding + + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = top_pipe->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = top_pipe->dsc_padding_params.dsc_hactive_padding; + + /* Set DSC configuration */ + if (dsc->funcs->dsc_set_config) + dsc->funcs->dsc_set_config(dsc, &dsc_cfg, + ¶ms->dsc_calculate_and_set_config_params.dsc_optc_cfg); +} + +void hwss_dsc_enable_with_opp(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->dsc_enable_with_opp_params.pipe_ctx; + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + + if (dsc && dsc->funcs->dsc_enable) + dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); +} + +void hwss_tg_program_global_sync(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_program_global_sync_params.tg; + int vready_offset = params->tg_program_global_sync_params.vready_offset; + unsigned int vstartup_lines = params->tg_program_global_sync_params.vstartup_lines; + unsigned int vupdate_offset_pixels = params->tg_program_global_sync_params.vupdate_offset_pixels; + unsigned int vupdate_vupdate_width_pixels = params->tg_program_global_sync_params.vupdate_vupdate_width_pixels; + unsigned int pstate_keepout_start_lines = params->tg_program_global_sync_params.pstate_keepout_start_lines; + + if (tg->funcs->program_global_sync) { + tg->funcs->program_global_sync(tg, vready_offset, vstartup_lines, + vupdate_offset_pixels, vupdate_vupdate_width_pixels, pstate_keepout_start_lines); + } +} + +void hwss_tg_wait_for_state(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_wait_for_state_params.tg; + enum crtc_state state = params->tg_wait_for_state_params.state; + + if (tg->funcs->wait_for_state) + tg->funcs->wait_for_state(tg, state); +} + +void hwss_tg_set_vtg_params(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_vtg_params_params.tg; + struct dc_crtc_timing *timing = params->tg_set_vtg_params_params.timing; + bool program_fp2 = params->tg_set_vtg_params_params.program_fp2; + + if (tg->funcs->set_vtg_params) + tg->funcs->set_vtg_params(tg, timing, program_fp2); +} + +void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_setup_vertical_interrupt2_params.tg; + int start_line = params->tg_setup_vertical_interrupt2_params.start_line; + + if (tg->funcs->setup_vertical_interrupt2) + tg->funcs->setup_vertical_interrupt2(tg, start_line); +} + +void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_set_hdr_multiplier_params.dpp; + uint32_t hw_mult = params->dpp_set_hdr_multiplier_params.hw_mult; + + if (dpp->funcs->dpp_set_hdr_multiplier) + dpp->funcs->dpp_set_hdr_multiplier(dpp, hw_mult); +} + +void hwss_program_det_size(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->program_det_size_params.hubbub; + unsigned int hubp_inst = params->program_det_size_params.hubp_inst; + unsigned int det_buffer_size_kb = params->program_det_size_params.det_buffer_size_kb; + + if (hubbub->funcs->program_det_size) + hubbub->funcs->program_det_size(hubbub, hubp_inst, det_buffer_size_kb); +} + +void hwss_program_det_segments(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->program_det_segments_params.hubbub; + unsigned int hubp_inst = params->program_det_segments_params.hubp_inst; + unsigned int det_size = params->program_det_segments_params.det_size; + + if (hubbub->funcs->program_det_segments) + hubbub->funcs->program_det_segments(hubbub, hubp_inst, det_size); +} + +void hwss_opp_set_dyn_expansion(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_set_dyn_expansion_params.opp; + enum dc_color_space color_space = params->opp_set_dyn_expansion_params.color_space; + enum dc_color_depth color_depth = params->opp_set_dyn_expansion_params.color_depth; + enum signal_type signal = params->opp_set_dyn_expansion_params.signal; + + if (opp->funcs->opp_set_dyn_expansion) + opp->funcs->opp_set_dyn_expansion(opp, color_space, color_depth, signal); +} + +void hwss_opp_program_fmt(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_program_fmt_params.opp; + struct bit_depth_reduction_params *fmt_bit_depth = params->opp_program_fmt_params.fmt_bit_depth; + struct clamping_and_pixel_encoding_params *clamping = params->opp_program_fmt_params.clamping; + + if (opp->funcs->opp_program_fmt) + opp->funcs->opp_program_fmt(opp, fmt_bit_depth, clamping); +} + +void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_program_bit_depth_reduction_params.opp; + bool use_default_params = params->opp_program_bit_depth_reduction_params.use_default_params; + struct pipe_ctx *pipe_ctx = params->opp_program_bit_depth_reduction_params.pipe_ctx; + struct bit_depth_reduction_params bit_depth_params; + + if (use_default_params) + memset(&bit_depth_params, 0, sizeof(bit_depth_params)); + else + resource_build_bit_depth_reduction_params(pipe_ctx->stream, &bit_depth_params); + + if (opp->funcs->opp_program_bit_depth_reduction) + opp->funcs->opp_program_bit_depth_reduction(opp, &bit_depth_params); +} + +void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_set_disp_pattern_generator_params.opp; + enum controller_dp_test_pattern test_pattern = params->opp_set_disp_pattern_generator_params.test_pattern; + enum controller_dp_color_space color_space = params->opp_set_disp_pattern_generator_params.color_space; + enum dc_color_depth color_depth = params->opp_set_disp_pattern_generator_params.color_depth; + struct tg_color *solid_color = params->opp_set_disp_pattern_generator_params.use_solid_color ? + ¶ms->opp_set_disp_pattern_generator_params.solid_color : NULL; + int width = params->opp_set_disp_pattern_generator_params.width; + int height = params->opp_set_disp_pattern_generator_params.height; + int offset = params->opp_set_disp_pattern_generator_params.offset; + + if (opp && opp->funcs->opp_set_disp_pattern_generator) { + opp->funcs->opp_set_disp_pattern_generator(opp, test_pattern, color_space, + color_depth, solid_color, width, height, offset); + } +} + +void hwss_set_abm_pipe(union block_sequence_params *params) +{ + struct dc *dc = params->set_abm_pipe_params.dc; + struct pipe_ctx *pipe_ctx = params->set_abm_pipe_params.pipe_ctx; + + dc->hwss.set_pipe(pipe_ctx); +} + +void hwss_set_abm_level(union block_sequence_params *params) +{ + struct abm *abm = params->set_abm_level_params.abm; + unsigned int abm_level = params->set_abm_level_params.abm_level; + + if (abm->funcs->set_abm_level) + abm->funcs->set_abm_level(abm, abm_level); +} + +void hwss_set_abm_immediate_disable(union block_sequence_params *params) +{ + struct dc *dc = params->set_abm_immediate_disable_params.dc; + struct pipe_ctx *pipe_ctx = params->set_abm_immediate_disable_params.pipe_ctx; + + if (dc && dc->hwss.set_abm_immediate_disable) + dc->hwss.set_abm_immediate_disable(pipe_ctx); +} + +void hwss_mpc_remove_mpcc(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_remove_mpcc_params.mpc; + struct mpc_tree *mpc_tree_params = params->mpc_remove_mpcc_params.mpc_tree_params; + struct mpcc *mpcc_to_remove = params->mpc_remove_mpcc_params.mpcc_to_remove; + + mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); +} + +void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_set_mpcc_disconnect_pending_params.opp; + int mpcc_inst = params->opp_set_mpcc_disconnect_pending_params.mpcc_inst; + bool pending = params->opp_set_mpcc_disconnect_pending_params.pending; + + opp->mpcc_disconnect_pending[mpcc_inst] = pending; +} + +void hwss_dc_set_optimized_required(union block_sequence_params *params) +{ + struct dc *dc = params->dc_set_optimized_required_params.dc; + bool optimized_required = params->dc_set_optimized_required_params.optimized_required; + + dc->optimized_required = optimized_required; +} + +void hwss_hubp_disconnect(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_disconnect_params.hubp; + + if (hubp->funcs->hubp_disconnect) + hubp->funcs->hubp_disconnect(hubp); +} + +void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_force_pstate_change_control_params.hubbub; + bool enable = params->hubbub_force_pstate_change_control_params.enable; + bool wait = params->hubbub_force_pstate_change_control_params.wait; + + if (hubbub->funcs->force_pstate_change_control) { + hubbub->funcs->force_pstate_change_control(hubbub, enable, wait); + /* Add delay when enabling pstate change control */ + if (enable) + udelay(500); + } +} + +void hwss_tg_enable_crtc(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_enable_crtc_params.tg; + + if (tg->funcs->enable_crtc) + tg->funcs->enable_crtc(tg); +} + +void hwss_tg_set_gsl(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_gsl_params.tg; + struct gsl_params *gsl = ¶ms->tg_set_gsl_params.gsl; + + if (tg->funcs->set_gsl) + tg->funcs->set_gsl(tg, gsl); +} + +void hwss_tg_set_gsl_source_select(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_gsl_source_select_params.tg; + int group_idx = params->tg_set_gsl_source_select_params.group_idx; + uint32_t gsl_ready_signal = params->tg_set_gsl_source_select_params.gsl_ready_signal; + + if (tg->funcs->set_gsl_source_select) + tg->funcs->set_gsl_source_select(tg, group_idx, gsl_ready_signal); +} + +void hwss_hubp_wait_flip_pending(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_wait_flip_pending_params.hubp; + unsigned int timeout_us = params->hubp_wait_flip_pending_params.timeout_us; + unsigned int polling_interval_us = params->hubp_wait_flip_pending_params.polling_interval_us; + int j = 0; + + for (j = 0; j < timeout_us / polling_interval_us + && hubp->funcs->hubp_is_flip_pending(hubp); j++) + udelay(polling_interval_us); +} + +void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_wait_double_buffer_pending_params.tg; + unsigned int timeout_us = params->tg_wait_double_buffer_pending_params.timeout_us; + unsigned int polling_interval_us = params->tg_wait_double_buffer_pending_params.polling_interval_us; + int j = 0; + + if (tg->funcs->get_optc_double_buffer_pending) { + for (j = 0; j < timeout_us / polling_interval_us + && tg->funcs->get_optc_double_buffer_pending(tg); j++) + udelay(polling_interval_us); + } +} + +void hwss_update_force_pstate(union block_sequence_params *params) +{ + struct dc *dc = params->update_force_pstate_params.dc; + struct dc_state *context = params->update_force_pstate_params.context; + struct dce_hwseq *hwseq = dc->hwseq; + + if (hwseq->funcs.update_force_pstate) + hwseq->funcs.update_force_pstate(dc, context); +} + +void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_apply_dedcn21_147_wa_params.hubbub; + + hubbub->funcs->apply_DEDCN21_147_wa(hubbub); +} + +void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_allow_self_refresh_control_params.hubbub; + bool allow = params->hubbub_allow_self_refresh_control_params.allow; + + hubbub->funcs->allow_self_refresh_control(hubbub, allow); + + if (!allow && params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied) + *params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = true; +} + +void hwss_tg_get_frame_count(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_get_frame_count_params.tg; + unsigned int *frame_count = params->tg_get_frame_count_params.frame_count; + + *frame_count = tg->funcs->get_frame_count(tg); +} + +void hwss_mpc_set_dwb_mux(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_set_dwb_mux_params.mpc; + int dwb_id = params->mpc_set_dwb_mux_params.dwb_id; + int mpcc_id = params->mpc_set_dwb_mux_params.mpcc_id; + + if (mpc->funcs->set_dwb_mux) + mpc->funcs->set_dwb_mux(mpc, dwb_id, mpcc_id); +} + +void hwss_mpc_disable_dwb_mux(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_disable_dwb_mux_params.mpc; + unsigned int dwb_id = params->mpc_disable_dwb_mux_params.dwb_id; + + if (mpc->funcs->disable_dwb_mux) + mpc->funcs->disable_dwb_mux(mpc, dwb_id); +} + +void hwss_mcif_wb_config_buf(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_config_buf_params.mcif_wb; + struct mcif_buf_params *mcif_buf_params = params->mcif_wb_config_buf_params.mcif_buf_params; + unsigned int dest_height = params->mcif_wb_config_buf_params.dest_height; + + if (mcif_wb->funcs->config_mcif_buf) + mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, dest_height); +} + +void hwss_mcif_wb_config_arb(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_config_arb_params.mcif_wb; + struct mcif_arb_params *mcif_arb_params = params->mcif_wb_config_arb_params.mcif_arb_params; + + if (mcif_wb->funcs->config_mcif_arb) + mcif_wb->funcs->config_mcif_arb(mcif_wb, mcif_arb_params); +} + +void hwss_mcif_wb_enable(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_enable_params.mcif_wb; + + if (mcif_wb->funcs->enable_mcif) + mcif_wb->funcs->enable_mcif(mcif_wb); +} + +void hwss_mcif_wb_disable(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_disable_params.mcif_wb; + + if (mcif_wb->funcs->disable_mcif) + mcif_wb->funcs->disable_mcif(mcif_wb); +} + +void hwss_dwbc_enable(union block_sequence_params *params) +{ + struct dwbc *dwb = params->dwbc_enable_params.dwb; + struct dc_dwb_params *dwb_params = params->dwbc_enable_params.dwb_params; + + if (dwb->funcs->enable) + dwb->funcs->enable(dwb, dwb_params); +} + +void hwss_dwbc_disable(union block_sequence_params *params) +{ + struct dwbc *dwb = params->dwbc_disable_params.dwb; + + if (dwb->funcs->disable) + dwb->funcs->disable(dwb); +} + +void hwss_dwbc_update(union block_sequence_params *params) +{ + struct dwbc *dwb = params->dwbc_update_params.dwb; + struct dc_dwb_params *dwb_params = params->dwbc_update_params.dwb_params; + + if (dwb->funcs->update) + dwb->funcs->update(dwb, dwb_params); +} + +void hwss_hubp_update_mall_sel(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_update_mall_sel_params.hubp; + uint32_t mall_sel = params->hubp_update_mall_sel_params.mall_sel; + bool cache_cursor = params->hubp_update_mall_sel_params.cache_cursor; + + if (hubp && hubp->funcs->hubp_update_mall_sel) + hubp->funcs->hubp_update_mall_sel(hubp, mall_sel, cache_cursor); +} + +void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_prepare_subvp_buffering_params.hubp; + bool enable = params->hubp_prepare_subvp_buffering_params.enable; + + if (hubp && hubp->funcs->hubp_prepare_subvp_buffering) + hubp->funcs->hubp_prepare_subvp_buffering(hubp, enable); +} + +void hwss_hubp_set_blank_en(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_blank_en_params.hubp; + bool enable = params->hubp_set_blank_en_params.enable; + + if (hubp && hubp->funcs->set_hubp_blank_en) + hubp->funcs->set_hubp_blank_en(hubp, enable); +} + +void hwss_hubp_disable_control(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_disable_control_params.hubp; + bool disable = params->hubp_disable_control_params.disable; + + if (hubp && hubp->funcs->hubp_disable_control) + hubp->funcs->hubp_disable_control(hubp, disable); +} + +void hwss_hubbub_soft_reset(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_soft_reset_params.hubbub; + bool reset = params->hubbub_soft_reset_params.reset; + + if (hubbub) + params->hubbub_soft_reset_params.hubbub_soft_reset(hubbub, reset); +} + +void hwss_hubp_clk_cntl(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_clk_cntl_params.hubp; + bool enable = params->hubp_clk_cntl_params.enable; + + if (hubp && hubp->funcs->hubp_clk_cntl) { + hubp->funcs->hubp_clk_cntl(hubp, enable); + hubp->power_gated = !enable; + } +} + +void hwss_hubp_init(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_init_params.hubp; + + if (hubp && hubp->funcs->hubp_init) + hubp->funcs->hubp_init(hubp); +} + +void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_vm_system_aperture_settings_params.hubp; + struct vm_system_aperture_param apt; + + apt.sys_default = params->hubp_set_vm_system_aperture_settings_params.sys_default; + apt.sys_high = params->hubp_set_vm_system_aperture_settings_params.sys_high; + apt.sys_low = params->hubp_set_vm_system_aperture_settings_params.sys_low; + + if (hubp && hubp->funcs->hubp_set_vm_system_aperture_settings) + hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt); +} + +void hwss_hubp_set_flip_int(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_flip_int_params.hubp; + + if (hubp && hubp->funcs->hubp_set_flip_int) + hubp->funcs->hubp_set_flip_int(hubp); +} + +void hwss_dpp_dppclk_control(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_dppclk_control_params.dpp; + bool dppclk_div = params->dpp_dppclk_control_params.dppclk_div; + bool enable = params->dpp_dppclk_control_params.enable; + + if (dpp && dpp->funcs->dpp_dppclk_control) + dpp->funcs->dpp_dppclk_control(dpp, dppclk_div, enable); +} + +void hwss_disable_phantom_crtc(union block_sequence_params *params) +{ + struct timing_generator *tg = params->disable_phantom_crtc_params.tg; + + if (tg && tg->funcs->disable_phantom_crtc) + tg->funcs->disable_phantom_crtc(tg); +} + +void hwss_dsc_pg_status(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->dsc_pg_status_params.hws; + int dsc_inst = params->dsc_pg_status_params.dsc_inst; + + if (hws && hws->funcs.dsc_pg_status) + params->dsc_pg_status_params.is_ungated = hws->funcs.dsc_pg_status(hws, dsc_inst); +} + +void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_wait_disconnect_pending_clear_params.dsc; + + if (!params->dsc_wait_disconnect_pending_clear_params.is_ungated) + return; + if (*params->dsc_wait_disconnect_pending_clear_params.is_ungated == false) + return; + + if (dsc && dsc->funcs->dsc_wait_disconnect_pending_clear) + dsc->funcs->dsc_wait_disconnect_pending_clear(dsc); +} + +void hwss_dsc_disable(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_disable_params.dsc; + + if (!params->dsc_disable_params.is_ungated) + return; + if (*params->dsc_disable_params.is_ungated == false) + return; + + if (dsc && dsc->funcs->dsc_disable) + dsc->funcs->dsc_disable(dsc); +} + +void hwss_dccg_set_ref_dscclk(union block_sequence_params *params) +{ + struct dccg *dccg = params->dccg_set_ref_dscclk_params.dccg; + int dsc_inst = params->dccg_set_ref_dscclk_params.dsc_inst; + + if (!params->dccg_set_ref_dscclk_params.is_ungated) + return; + if (*params->dccg_set_ref_dscclk_params.is_ungated == false) + return; + + if (dccg && dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, dsc_inst); +} + +void hwss_dpp_pg_control(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->dpp_pg_control_params.hws; + unsigned int dpp_inst = params->dpp_pg_control_params.dpp_inst; + bool power_on = params->dpp_pg_control_params.power_on; + + if (hws->funcs.dpp_pg_control) + hws->funcs.dpp_pg_control(hws, dpp_inst, power_on); +} + +void hwss_hubp_pg_control(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->hubp_pg_control_params.hws; + unsigned int hubp_inst = params->hubp_pg_control_params.hubp_inst; + bool power_on = params->hubp_pg_control_params.power_on; + + if (hws->funcs.hubp_pg_control) + hws->funcs.hubp_pg_control(hws, hubp_inst, power_on); +} + +void hwss_hubp_reset(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_reset_params.hubp; + + if (hubp && hubp->funcs->hubp_reset) + hubp->funcs->hubp_reset(hubp); +} + +void hwss_dpp_reset(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_reset_params.dpp; + + if (dpp && dpp->funcs->dpp_reset) + dpp->funcs->dpp_reset(dpp); +} + +void hwss_dpp_root_clock_control(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->dpp_root_clock_control_params.hws; + unsigned int dpp_inst = params->dpp_root_clock_control_params.dpp_inst; + bool clock_on = params->dpp_root_clock_control_params.clock_on; + + if (hws->funcs.dpp_root_clock_control) + hws->funcs.dpp_root_clock_control(hws, dpp_inst, clock_on); +} + +void hwss_dc_ip_request_cntl(union block_sequence_params *params) +{ + struct dc *dc = params->dc_ip_request_cntl_params.dc; + bool enable = params->dc_ip_request_cntl_params.enable; + struct dce_hwseq *hws = dc->hwseq; + + if (hws->funcs.dc_ip_request_cntl) + hws->funcs.dc_ip_request_cntl(dc, enable); +} + +void hwss_dccg_update_dpp_dto(union block_sequence_params *params) +{ + struct dccg *dccg = params->dccg_update_dpp_dto_params.dccg; + int dpp_inst = params->dccg_update_dpp_dto_params.dpp_inst; + int dppclk_khz = params->dccg_update_dpp_dto_params.dppclk_khz; + + if (dccg && dccg->funcs->update_dpp_dto) + dccg->funcs->update_dpp_dto(dccg, dpp_inst, dppclk_khz); +} + +void hwss_hubp_vtg_sel(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_vtg_sel_params.hubp; + uint32_t otg_inst = params->hubp_vtg_sel_params.otg_inst; + + if (hubp && hubp->funcs->hubp_vtg_sel) + hubp->funcs->hubp_vtg_sel(hubp, otg_inst); +} + +void hwss_hubp_setup2(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup2_params.hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup2_params.hubp_regs; + union dml2_global_sync_programming *global_sync = params->hubp_setup2_params.global_sync; + struct dc_crtc_timing *timing = params->hubp_setup2_params.timing; + + if (hubp && hubp->funcs->hubp_setup2) + hubp->funcs->hubp_setup2(hubp, hubp_regs, global_sync, timing); +} + +void hwss_hubp_setup(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup_params.hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_params.dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_params.ttu_regs; + struct _vcs_dpi_display_rq_regs_st *rq_regs = params->hubp_setup_params.rq_regs; + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest = params->hubp_setup_params.pipe_dest; + + if (hubp && hubp->funcs->hubp_setup) + hubp->funcs->hubp_setup(hubp, dlg_regs, ttu_regs, rq_regs, pipe_dest); +} + +void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_unbounded_requesting_params.hubp; + bool unbounded_req = params->hubp_set_unbounded_requesting_params.unbounded_req; + + if (hubp && hubp->funcs->set_unbounded_requesting) + hubp->funcs->set_unbounded_requesting(hubp, unbounded_req); +} + +void hwss_hubp_setup_interdependent2(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup_interdependent2_params.hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup_interdependent2_params.hubp_regs; + + if (hubp && hubp->funcs->hubp_setup_interdependent2) + hubp->funcs->hubp_setup_interdependent2(hubp, hubp_regs); +} + +void hwss_hubp_setup_interdependent(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup_interdependent_params.hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_interdependent_params.dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_interdependent_params.ttu_regs; + + if (hubp && hubp->funcs->hubp_setup_interdependent) + hubp->funcs->hubp_setup_interdependent(hubp, dlg_regs, ttu_regs); +} + +void hwss_dpp_set_cursor_matrix(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_set_cursor_matrix_params.dpp; + enum dc_color_space color_space = params->dpp_set_cursor_matrix_params.color_space; + struct dc_csc_transform *cursor_csc_color_matrix = params->dpp_set_cursor_matrix_params.cursor_csc_color_matrix; + + if (dpp && dpp->funcs->set_cursor_matrix) + dpp->funcs->set_cursor_matrix(dpp, color_space, *cursor_csc_color_matrix); +} + +void hwss_mpc_update_mpcc(union block_sequence_params *params) +{ + struct dc *dc = params->mpc_update_mpcc_params.dc; + struct pipe_ctx *pipe_ctx = params->mpc_update_mpcc_params.pipe_ctx; + struct dce_hwseq *hws = dc->hwseq; + + if (hws->funcs.update_mpcc) + hws->funcs.update_mpcc(dc, pipe_ctx); +} + +void hwss_mpc_update_blending(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_update_blending_params.mpc; + struct mpcc_blnd_cfg *blnd_cfg = ¶ms->mpc_update_blending_params.blnd_cfg; + int mpcc_id = params->mpc_update_blending_params.mpcc_id; + + if (mpc && mpc->funcs->update_blending) + mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id); +} + +void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_assert_idle_mpcc_params.mpc; + int mpcc_id = params->mpc_assert_idle_mpcc_params.mpcc_id; + + if (mpc && mpc->funcs->wait_for_idle) + mpc->funcs->wait_for_idle(mpc, mpcc_id); +} + +void hwss_mpc_insert_plane(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_insert_plane_params.mpc; + struct mpc_tree *tree = params->mpc_insert_plane_params.mpc_tree_params; + struct mpcc_blnd_cfg *blnd_cfg = ¶ms->mpc_insert_plane_params.blnd_cfg; + struct mpcc_sm_cfg *sm_cfg = params->mpc_insert_plane_params.sm_cfg; + struct mpcc *insert_above_mpcc = params->mpc_insert_plane_params.insert_above_mpcc; + int mpcc_id = params->mpc_insert_plane_params.mpcc_id; + int dpp_id = params->mpc_insert_plane_params.dpp_id; + + if (mpc && mpc->funcs->insert_plane) + mpc->funcs->insert_plane(mpc, tree, blnd_cfg, sm_cfg, insert_above_mpcc, + dpp_id, mpcc_id); +} + +void hwss_dpp_set_scaler(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_set_scaler_params.dpp; + const struct scaler_data *scl_data = params->dpp_set_scaler_params.scl_data; + + if (dpp && dpp->funcs->dpp_set_scaler) + dpp->funcs->dpp_set_scaler(dpp, scl_data); +} + +void hwss_hubp_mem_program_viewport(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_mem_program_viewport_params.hubp; + const struct rect *viewport = params->hubp_mem_program_viewport_params.viewport; + const struct rect *viewport_c = params->hubp_mem_program_viewport_params.viewport_c; + + if (hubp && hubp->funcs->mem_program_viewport) + hubp->funcs->mem_program_viewport(hubp, viewport, viewport_c); +} + +void hwss_abort_cursor_offload_update(union block_sequence_params *params) +{ + struct dc *dc = params->abort_cursor_offload_update_params.dc; + struct pipe_ctx *pipe_ctx = params->abort_cursor_offload_update_params.pipe_ctx; + + if (dc && dc->hwss.abort_cursor_offload_update) + dc->hwss.abort_cursor_offload_update(dc, pipe_ctx); +} + +void hwss_set_cursor_attribute(union block_sequence_params *params) +{ + struct dc *dc = params->set_cursor_attribute_params.dc; + struct pipe_ctx *pipe_ctx = params->set_cursor_attribute_params.pipe_ctx; + + if (dc && dc->hwss.set_cursor_attribute) + dc->hwss.set_cursor_attribute(pipe_ctx); +} + +void hwss_set_cursor_position(union block_sequence_params *params) +{ + struct dc *dc = params->set_cursor_position_params.dc; + struct pipe_ctx *pipe_ctx = params->set_cursor_position_params.pipe_ctx; + + if (dc && dc->hwss.set_cursor_position) + dc->hwss.set_cursor_position(pipe_ctx); +} + +void hwss_set_cursor_sdr_white_level(union block_sequence_params *params) +{ + struct dc *dc = params->set_cursor_sdr_white_level_params.dc; + struct pipe_ctx *pipe_ctx = params->set_cursor_sdr_white_level_params.pipe_ctx; + + if (dc && dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); +} + +void hwss_program_output_csc(union block_sequence_params *params) +{ + struct dc *dc = params->program_output_csc_params.dc; + struct pipe_ctx *pipe_ctx = params->program_output_csc_params.pipe_ctx; + enum dc_color_space colorspace = params->program_output_csc_params.colorspace; + uint16_t *matrix = params->program_output_csc_params.matrix; + int opp_id = params->program_output_csc_params.opp_id; + + if (dc && dc->hwss.program_output_csc) + dc->hwss.program_output_csc(dc, pipe_ctx, colorspace, matrix, opp_id); +} + +void hwss_hubp_set_blank(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_blank_params.hubp; + bool blank = params->hubp_set_blank_params.blank; + + if (hubp && hubp->funcs->set_blank) + hubp->funcs->set_blank(hubp, blank); +} + +void hwss_phantom_hubp_post_enable(union block_sequence_params *params) +{ + struct hubp *hubp = params->phantom_hubp_post_enable_params.hubp; + + if (hubp && hubp->funcs->phantom_hubp_post_enable) + hubp->funcs->phantom_hubp_post_enable(hubp); +} + +void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, int inst, int num_slices_h) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DCCG_SET_DTO_DSCCLK; + seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.dccg = dccg; + seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.inst = inst; + seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.num_slices_h = num_slices_h; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_CALCULATE_AND_SET_CONFIG; + seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.enable = enable; + seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.opp_cnt = opp_cnt; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_REMOVE_MPCC; + seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc_tree_params = mpc_tree_params; + seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpcc_to_remove = mpcc_to_remove; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, int mpcc_inst, bool pending) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_SET_MPCC_DISCONNECT_PENDING; + seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.mpcc_inst = mpcc_inst; + seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.pending = pending; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_DISCONNECT; + seq_state->steps[*seq_state->num_steps].params.hubp_disconnect_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_ENABLE_WITH_OPP; + seq_state->steps[*seq_state->num_steps].params.dsc_enable_with_opp_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state, + struct timing_generator *tg, struct dsc_optc_config *dsc_optc_cfg, bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_SET_DSC_CONFIG; + seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.dsc_optc_cfg = dsc_optc_cfg; + seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_DISCONNECT; + seq_state->steps[*seq_state->num_steps].params.dsc_disconnect_params.dsc = dsc; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state, + struct dc *dc, bool optimized_required) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DC_SET_OPTIMIZED_REQUIRED; + seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.optimized_required = optimized_required; + (*seq_state->num_steps)++; + } +} + +void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = ABM_SET_IMMEDIATE_DISABLE; + seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, + enum dc_color_depth color_depth, + struct tg_color solid_color, + bool use_solid_color, + int width, + int height, + int offset) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_SET_DISP_PATTERN_GENERATOR; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.test_pattern = test_pattern; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_space = color_space; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_depth = color_depth; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.solid_color = solid_color; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.use_solid_color = use_solid_color; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.width = width; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.height = height; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.offset = offset; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC update blending to block sequence + */ +void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpcc_blnd_cfg blnd_cfg, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_BLENDING; + seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.blnd_cfg = blnd_cfg; + seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC insert plane to block sequence + */ +void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpc_tree *mpc_tree_params, + struct mpcc_blnd_cfg blnd_cfg, + struct mpcc_sm_cfg *sm_cfg, + struct mpcc *insert_above_mpcc, + int dpp_id, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_INSERT_PLANE; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc_tree_params = mpc_tree_params; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.blnd_cfg = blnd_cfg; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.sm_cfg = sm_cfg; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.insert_above_mpcc = insert_above_mpcc; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.dpp_id = dpp_id; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC assert idle MPCC to block sequence + */ +void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_ASSERT_IDLE_MPCC; + seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP set blank to block sequence + */ +void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool blank) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.blank = blank; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool use_default_params, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_BIT_DEPTH_REDUCTION; + seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.use_default_params = use_default_params; + seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state, + struct dc *dc, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DC_IP_REQUEST_CNTL; + seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dwbc_update(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DWBC_UPDATE; + seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb = dwb; + seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb_params = dwb_params; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_buf_params *mcif_buf_params, + unsigned int dest_height) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_BUF; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_wb = mcif_wb; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_buf_params = mcif_buf_params; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.dest_height = dest_height; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_arb_params *mcif_arb_params) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_ARB; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_wb = mcif_wb; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_arb_params = mcif_arb_params; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_ENABLE; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_enable_params.mcif_wb = mcif_wb; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_DISABLE; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_disable_params.mcif_wb = mcif_wb; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + int dwb_id, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_SET_DWB_MUX; + seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.dwb_id = dwb_id; + seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + unsigned int dwb_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_DISABLE_DWB_MUX; + seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.dwb_id = dwb_id; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dwbc_enable(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DWBC_ENABLE; + seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb = dwb; + seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb_params = dwb_params; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dwbc_disable(struct block_sequence_state *seq_state, + struct dwbc *dwb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DWBC_DISABLE; + seq_state->steps[*seq_state->num_steps].params.dwbc_disable_params.dwb = dwb; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct gsl_params gsl) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.gsl = gsl; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int group_idx, + uint32_t gsl_ready_signal) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL_SOURCE_SELECT; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.group_idx = group_idx; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.gsl_ready_signal = gsl_ready_signal; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t mall_sel, + bool cache_cursor) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_MALL_SEL; + seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.mall_sel = mall_sel; + seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.cache_cursor = cache_cursor; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_PREPARE_SUBVP_BUFFERING; + seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK_EN; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool disable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_DISABLE_CONTROL; + seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.disable = disable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset), + bool reset) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBBUB_SOFT_RESET; + seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub_soft_reset = hubbub_soft_reset; + seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.reset = reset; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_CLK_CNTL; + seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state, + struct dpp *dpp, + bool dppclk_div, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_DPPCLK_CONTROL; + seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dppclk_div = dppclk_div; + seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DISABLE_PHANTOM_CRTC; + seq_state->steps[*seq_state->num_steps].params.disable_phantom_crtc_params.tg = tg; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + int dsc_inst, + bool is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_PG_STATUS; + seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.dsc_inst = dsc_inst; + seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_WAIT_DISCONNECT_PENDING_CLEAR; + seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.dsc = dsc; + seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_disable(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_DISABLE; + seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.dsc = dsc; + seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dsc_inst, + bool *is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DCCG_SET_REF_DSCCLK; + seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dccg = dccg; + seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dsc_inst = dsc_inst; + seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool clock_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_ROOT_CLOCK_CONTROL; + seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.dpp_inst = dpp_inst; + seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.clock_on = clock_on; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_PG_CONTROL; + seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.dpp_inst = dpp_inst; + seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.power_on = power_on; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_PG_CONTROL; + seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hubp_inst = hubp_inst; + seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.power_on = power_on; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_init(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_INIT; + seq_state->steps[*seq_state->num_steps].params.hubp_init_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_reset(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_RESET; + seq_state->steps[*seq_state->num_steps].params.hubp_reset_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_reset(struct block_sequence_state *seq_state, + struct dpp *dpp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_RESET; + seq_state->steps[*seq_state->num_steps].params.dpp_reset_params.dpp = dpp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_PIPE_CLOCK_CONTROL; + seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint64_t sys_default, + uint64_t sys_low, + uint64_t sys_high) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_default.quad_part = sys_default; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_low.quad_part = sys_low; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_high.quad_part = sys_high; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_INT; + seq_state->steps[*seq_state->num_steps].params.hubp_set_flip_int_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dpp_inst, + int dppclk_khz) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DCCG_UPDATE_DPP_DTO; + seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dccg = dccg; + seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dpp_inst = dpp_inst; + seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dppclk_khz = dppclk_khz; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t otg_inst) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_VTG_SEL; + seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.otg_inst = otg_inst; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs, + union dml2_global_sync_programming *global_sync, + struct dc_crtc_timing *timing) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP2; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp_regs = hubp_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.global_sync = global_sync; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.timing = timing; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs, + struct _vcs_dpi_display_rq_regs_st *rq_regs, + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.dlg_regs = dlg_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.ttu_regs = ttu_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.rq_regs = rq_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.pipe_dest = pipe_dest; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool unbounded_req) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_UNBOUNDED_REQUESTING; + seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.unbounded_req = unbounded_req; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT2; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp_regs = hubp_regs; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.dlg_regs = dlg_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.ttu_regs = ttu_regs; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state, + struct hubp *hubp, + enum surface_pixel_format format, + struct dc_tiling_info *tiling_info, + struct plane_size plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + int compat_level) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_SURFACE_CONFIG; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.format = format; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.tiling_info = tiling_info; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.plane_size = plane_size; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.rotation = rotation; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.dcc = dcc; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.horizontal_mirror = horizontal_mirror; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.compat_level = compat_level; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_SETUP_DPP; + seq_state->steps[*seq_state->num_steps].params.setup_dpp_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state, + struct dpp *dpp, + enum dc_color_space color_space, + struct dc_csc_transform *cursor_csc_color_matrix) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_SET_CURSOR_MATRIX; + seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.color_space = color_space; + seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.cursor_csc_color_matrix = cursor_csc_color_matrix; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state, + struct dpp *dpp, + const struct scaler_data *scl_data) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_SET_SCALER; + seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.scl_data = scl_data; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state, + struct hubp *hubp, + const struct rect *viewport, + const struct rect *viewport_c) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_MEM_PROGRAM_VIEWPORT; + seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport = viewport; + seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport_c = viewport_c; + (*seq_state->num_steps)++; + } +} + +void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = ABORT_CURSOR_OFFLOAD_UPDATE; + seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_ATTRIBUTE; + seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_set_cursor_position(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_POSITION; + seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_SDR_WHITE_LEVEL; + seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_program_output_csc(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = PROGRAM_OUTPUT_CSC; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.colorspace = colorspace; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.matrix = matrix; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.opp_id = opp_id; + (*seq_state->num_steps)++; + } +} + +void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = PHANTOM_HUBP_POST_ENABLE; + seq_state->steps[*seq_state->num_steps].params.phantom_hubp_post_enable_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_update_force_pstate(struct block_sequence_state *seq_state, + struct dc *dc, + struct dc_state *context) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = UPDATE_FORCE_PSTATE; + seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.context = context; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state, + struct hubbub *hubbub) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBBUB_APPLY_DEDCN21_147_WA; + seq_state->steps[*seq_state->num_steps].params.hubbub_apply_dedcn21_147_wa_params.hubbub = hubbub; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + bool allow, + bool *disallow_self_refresh_applied) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBBUB_ALLOW_SELF_REFRESH_CONTROL; + seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.allow = allow; + seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = disallow_self_refresh_applied; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state, + struct timing_generator *tg, + unsigned int *frame_count) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_GET_FRAME_COUNT; + seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.frame_count = frame_count; + (*seq_state->num_steps)++; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index a180f68f711c..deb23d20bca6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -522,10 +522,10 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc) { struct link_encoder *link_enc = NULL; - enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS]; + enum engine_id encs_assigned[MAX_LINK_ENCODERS]; int i; - for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++) + for (i = 0; i < MAX_LINK_ENCODERS; i++) encs_assigned[i] = ENGINE_ID_UNKNOWN; /* Add assigned encoders to list. */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index bc5dedf5f60c..848c267ef11e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -95,10 +95,44 @@ #define DC_LOGGER \ dc->ctx->logger #define DC_LOGGER_INIT(logger) -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #define UNABLE_TO_SPLIT -1 +static void capture_pipe_topology_data(struct dc *dc, int plane_idx, int slice_idx, int stream_idx, + int dpp_inst, int opp_inst, int tg_inst, bool is_phantom_pipe) +{ + struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index]; + + if (current_snapshot->line_count >= MAX_PIPES) + return; + + current_snapshot->pipe_log_lines[current_snapshot->line_count].is_phantom_pipe = is_phantom_pipe; + current_snapshot->pipe_log_lines[current_snapshot->line_count].plane_idx = plane_idx; + current_snapshot->pipe_log_lines[current_snapshot->line_count].slice_idx = slice_idx; + current_snapshot->pipe_log_lines[current_snapshot->line_count].stream_idx = stream_idx; + current_snapshot->pipe_log_lines[current_snapshot->line_count].dpp_inst = dpp_inst; + current_snapshot->pipe_log_lines[current_snapshot->line_count].opp_inst = opp_inst; + current_snapshot->pipe_log_lines[current_snapshot->line_count].tg_inst = tg_inst; + + current_snapshot->line_count++; +} + +static void start_new_topology_snapshot(struct dc *dc, struct dc_state *state) +{ + // Move to next snapshot slot (circular buffer) + dc->debug_data.topology_history.current_snapshot_index = (dc->debug_data.topology_history.current_snapshot_index + 1) % MAX_TOPOLOGY_SNAPSHOTS; + + // Clear the new snapshot + struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index]; + memset(current_snapshot, 0, sizeof(*current_snapshot)); + + // Set metadata + current_snapshot->timestamp_us = dm_get_timestamp(dc->ctx); + current_snapshot->stream_count = state->stream_count; + current_snapshot->phantom_stream_count = state->phantom_stream_count; +} + enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) { enum dce_version dc_version = DCE_VERSION_UNKNOWN; @@ -446,6 +480,14 @@ bool resource_construct( DC_ERR("DC: failed to create stream_encoder!\n"); pool->stream_enc_count++; } + + for (i = 0; i < caps->num_analog_stream_encoder; i++) { + pool->stream_enc[caps->num_stream_encoder + i] = + create_funcs->create_stream_encoder(ENGINE_ID_DACA + i, ctx); + if (pool->stream_enc[caps->num_stream_encoder + i] == NULL) + DC_ERR("DC: failed to create analog stream_encoder %d!\n", i); + pool->stream_enc_count++; + } } pool->hpo_dp_stream_enc_count = 0; @@ -2303,10 +2345,11 @@ bool resource_is_odm_topology_changed(const struct pipe_ctx *otg_master_a, static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe, int stream_idx, int slice_idx, int plane_idx, int slice_count, - bool is_primary) + bool is_primary, bool is_phantom_pipe) { DC_LOGGER_INIT(dc->ctx->logger); + // new format for logging: bit storing code if (slice_idx == 0 && plane_idx == 0 && is_primary) { /* case 0 (OTG master pipe with plane) */ DC_LOG_DC(" | plane%d slice%d stream%d|", @@ -2315,6 +2358,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe, pipe->plane_res.dpp->inst, pipe->stream_res.opp->inst, pipe->stream_res.tg->inst); + capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } else if (slice_idx == 0 && plane_idx == -1) { /* case 1 (OTG master pipe without plane) */ DC_LOG_DC(" | slice%d stream%d|", @@ -2323,6 +2370,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe, pipe->stream_res.opp->inst, pipe->stream_res.opp->inst, pipe->stream_res.tg->inst); + capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } else if (slice_idx != 0 && plane_idx == 0 && is_primary) { /* case 2 (OPP head pipe with plane) */ DC_LOG_DC(" | plane%d slice%d | |", @@ -2330,27 +2381,43 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe, DC_LOG_DC(" |DPP%d----OPP%d----| |", pipe->plane_res.dpp->inst, pipe->stream_res.opp->inst); + capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } else if (slice_idx != 0 && plane_idx == -1) { /* case 3 (OPP head pipe without plane) */ DC_LOG_DC(" | slice%d | |", slice_idx); DC_LOG_DC(" |DPG%d----OPP%d----| |", pipe->plane_res.dpp->inst, pipe->stream_res.opp->inst); + capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } else if (slice_idx == slice_count - 1) { /* case 4 (DPP pipe in last slice) */ DC_LOG_DC(" | plane%d | |", plane_idx); DC_LOG_DC(" |DPP%d----| |", pipe->plane_res.dpp->inst); + capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } else { /* case 5 (DPP pipe not in last slice) */ DC_LOG_DC(" | plane%d | | |", plane_idx); DC_LOG_DC(" |DPP%d----| | |", pipe->plane_res.dpp->inst); + capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } } static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state, - struct pipe_ctx *otg_master, int stream_idx) + struct pipe_ctx *otg_master, int stream_idx, bool is_phantom_pipe) { struct pipe_ctx *opp_heads[MAX_PIPES]; struct pipe_ctx *dpp_pipes[MAX_PIPES]; @@ -2376,12 +2443,12 @@ static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state, resource_log_pipe(dc, dpp_pipes[dpp_idx], stream_idx, slice_idx, plane_idx, slice_count, - is_primary); + is_primary, is_phantom_pipe); } } else { resource_log_pipe(dc, opp_heads[slice_idx], stream_idx, slice_idx, plane_idx, - slice_count, true); + slice_count, true, is_phantom_pipe); } } @@ -2412,6 +2479,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) struct pipe_ctx *otg_master; int stream_idx, phantom_stream_idx; DC_LOGGER_INIT(dc->ctx->logger); + bool is_phantom_pipe = false; + + // Start a new snapshot for this topology update + start_new_topology_snapshot(dc, state); DC_LOG_DC(" pipe topology update"); DC_LOG_DC(" ________________________"); @@ -2425,9 +2496,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) if (!otg_master) continue; - resource_log_pipe_for_stream(dc, state, otg_master, stream_idx); + resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe); } if (state->phantom_stream_count > 0) { + is_phantom_pipe = true; DC_LOG_DC(" | (phantom pipes) |"); for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) { if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN) @@ -2440,7 +2512,7 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) if (!otg_master) continue; - resource_log_pipe_for_stream(dc, state, otg_master, stream_idx); + resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe); } } DC_LOG_DC(" |________________________|\n"); @@ -2690,17 +2762,40 @@ static inline int find_fixed_dio_link_enc(const struct dc_link *link) } static inline int find_free_dio_link_enc(const struct resource_context *res_ctx, - const struct dc_link *link, const struct resource_pool *pool) + const struct dc_link *link, const struct resource_pool *pool, struct dc_stream_state *stream) { - int i; + int i, j = -1; + int stream_enc_inst = -1; int enc_count = pool->dig_link_enc_count; - /* for dpia, check preferred encoder first and then the next one */ - for (i = 0; i < enc_count; i++) - if (res_ctx->dio_link_enc_ref_cnts[(link->dpia_preferred_eng_id + i) % enc_count] == 0) - break; + /* Find stream encoder instance for the stream */ + if (stream) { + for (i = 0; i < pool->pipe_count; i++) { + if ((res_ctx->pipe_ctx[i].stream == stream) && + (res_ctx->pipe_ctx[i].stream_res.stream_enc != NULL)) { + stream_enc_inst = res_ctx->pipe_ctx[i].stream_res.stream_enc->id; + break; + } + } + } + + /* Assign dpia preferred > stream enc instance > available */ + for (i = 0; i < enc_count; i++) { + if (res_ctx->dio_link_enc_ref_cnts[i] == 0) { + if (j == -1) + j = i; + + if (link->dpia_preferred_eng_id == i) { + j = i; + break; + } - return (i >= 0 && i < enc_count) ? (link->dpia_preferred_eng_id + i) % enc_count : -1; + if (stream_enc_inst == i) { + j = stream_enc_inst; + } + } + } + return j; } static inline void acquire_dio_link_enc( @@ -2781,7 +2876,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc, retain_dio_link_enc(res_ctx, enc_index); } else { if (stream->link->is_dig_mapping_flexible) - enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool); + enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool, stream); else { int link_index = 0; @@ -2791,7 +2886,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc, * one into the acquiring link. */ if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) { - int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool); + int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool, stream); if (new_enc_index >= 0) swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index); @@ -5201,7 +5296,7 @@ struct link_encoder *get_temp_dio_link_enc( enc_index = link->eng_id; if (enc_index < 0) - enc_index = find_free_dio_link_enc(res_ctx, link, pool); + enc_index = find_free_dio_link_enc(res_ctx, link, pool, NULL); if (enc_index >= 0) link_enc = pool->link_encoders[enc_index]; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index c61300a7cb1c..2de8ef4a58ec 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -35,8 +35,8 @@ #include "link_enc_cfg.h" #if defined(CONFIG_DRM_AMD_DC_FP) -#include "dml2/dml2_wrapper.h" -#include "dml2/dml2_internal_types.h" +#include "dml2_0/dml2_wrapper.h" +#include "dml2_0/dml2_internal_types.h" #endif #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 9ac2d41f8fca..129cd5f84983 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -224,6 +224,14 @@ struct dc_stream_status *dc_stream_get_status( return dc_state_get_stream_status(dc->current_state, stream); } +const struct dc_stream_status *dc_stream_get_status_const( + const struct dc_stream_state *stream) +{ + struct dc *dc = stream->ctx->dc; + + return dc_state_get_stream_status(dc->current_state, stream); +} + void program_cursor_attributes( struct dc *dc, struct dc_stream_state *stream) @@ -231,6 +239,7 @@ void program_cursor_attributes( int i; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; + bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc); if (!stream) return; @@ -245,9 +254,14 @@ void program_cursor_attributes( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - dc->hwss.cursor_lock(dc, pipe_to_program, true); - if (pipe_to_program->next_odm_pipe) - dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true); + + if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) { + dc->hwss.begin_cursor_offload_update(dc, pipe_ctx); + } else { + dc->hwss.cursor_lock(dc, pipe_to_program, true); + if (pipe_to_program->next_odm_pipe) + dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true); + } } dc->hwss.set_cursor_attribute(pipe_ctx); @@ -255,12 +269,18 @@ void program_cursor_attributes( dc_send_update_cursor_info_to_dmu(pipe_ctx, i); if (dc->hwss.set_cursor_sdr_white_level) dc->hwss.set_cursor_sdr_white_level(pipe_ctx); + if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe) + dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx); } if (pipe_to_program) { - dc->hwss.cursor_lock(dc, pipe_to_program, false); - if (pipe_to_program->next_odm_pipe) - dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false); + if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) { + dc->hwss.commit_cursor_offload_update(dc, pipe_to_program); + } else { + dc->hwss.cursor_lock(dc, pipe_to_program, false); + if (pipe_to_program->next_odm_pipe) + dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false); + } } } @@ -366,6 +386,7 @@ void program_cursor_position( int i; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; + bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc); if (!stream) return; @@ -384,16 +405,27 @@ void program_cursor_position( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - dc->hwss.cursor_lock(dc, pipe_to_program, true); + + if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) + dc->hwss.begin_cursor_offload_update(dc, pipe_ctx); + else + dc->hwss.cursor_lock(dc, pipe_to_program, true); } dc->hwss.set_cursor_position(pipe_ctx); + if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe) + dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx); + if (dc->ctx->dmub_srv) dc_send_update_cursor_info_to_dmu(pipe_ctx, i); } - if (pipe_to_program) - dc->hwss.cursor_lock(dc, pipe_to_program, false); + if (pipe_to_program) { + if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) + dc->hwss.commit_cursor_offload_update(dc, pipe_to_program); + else + dc->hwss.cursor_lock(dc, pipe_to_program, false); + } } bool dc_stream_set_cursor_position( @@ -705,9 +737,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, { uint8_t i; bool ret = false; - struct dc *dc = stream->ctx->dc; - struct resource_context *res_ctx = - &dc->current_state->res_ctx; + struct dc *dc; + struct resource_context *res_ctx; + + if (!stream->ctx) + return false; + + dc = stream->ctx->dc; + res_ctx = &dc->current_state->res_ctx; dc_exit_ips_for_hw_access(dc); @@ -855,9 +892,11 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) stream->sink->sink_signal != SIGNAL_TYPE_NONE) { DC_LOG_DC( - "\tdispname: %s signal: %x\n", + "\tsignal: %x dispname: %s manufacturer_id: 0x%x product_id: 0x%x\n", + stream->signal, stream->sink->edid_caps.display_name, - stream->signal); + stream->sink->edid_caps.manufacturer_id, + stream->sink->edid_caps.product_id); } } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 98f0b6b3c213..29edfa51ea2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -42,7 +42,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "dmub/inc/dmub_cmd.h" @@ -54,8 +54,16 @@ struct abm_save_restore; struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; +struct dcn_hubbub_reg_state; +struct dcn_hubp_reg_state; +struct dcn_dpp_reg_state; +struct dcn_mpc_reg_state; +struct dcn_opp_reg_state; +struct dcn_dsc_reg_state; +struct dcn_optc_reg_state; +struct dcn_dccg_reg_state; -#define DC_VER "3.2.351" +#define DC_VER "3.2.359" /** * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC @@ -278,6 +286,15 @@ struct dc_scl_caps { bool sharpener_support; }; +struct dc_check_config { + /** + * max video plane width that can be safely assumed to be always + * supported by single DPP pipe. + */ + unsigned int max_optimizable_video_width; + bool enable_legacy_fast_update; +}; + struct dc_caps { uint32_t max_streams; uint32_t max_links; @@ -293,11 +310,6 @@ struct dc_caps { unsigned int max_cursor_size; unsigned int max_buffered_cursor_size; unsigned int max_video_width; - /* - * max video plane width that can be safely assumed to be always - * supported by single DPP pipe. - */ - unsigned int max_optimizable_video_width; unsigned int min_horizontal_blanking_period; int linear_pitch_alignment; bool dcc_const_color; @@ -455,6 +467,18 @@ enum surface_update_type { UPDATE_TYPE_FULL, /* may need to shuffle resources */ }; +enum dc_lock_descriptor { + LOCK_DESCRIPTOR_NONE = 0x0, + LOCK_DESCRIPTOR_STREAM = 0x1, + LOCK_DESCRIPTOR_LINK = 0x2, + LOCK_DESCRIPTOR_GLOBAL = 0x4, +}; + +struct surface_update_descriptor { + enum surface_update_type update_type; + enum dc_lock_descriptor lock_descriptor; +}; + /* Forward declaration*/ struct dc; struct dc_plane_state; @@ -530,6 +554,7 @@ struct dc_config { bool set_pipe_unlock_order; bool enable_dpia_pre_training; bool unify_link_enc_assignment; + bool enable_cursor_offload; struct spl_sharpness_range dcn_sharpness_range; struct spl_sharpness_range dcn_override_sharpness_range; }; @@ -849,8 +874,7 @@ union dpia_debug_options { uint32_t enable_force_tbt3_work_around:1; /* bit 4 */ uint32_t disable_usb4_pm_support:1; /* bit 5 */ uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */ - uint32_t enable_bw_allocation_mode:1; /* bit 7 */ - uint32_t reserved:24; + uint32_t reserved:25; } bits; uint32_t raw; }; @@ -875,6 +899,7 @@ struct dc_debug_data { uint32_t ltFailCount; uint32_t i2cErrorCount; uint32_t auxErrorCount; + struct pipe_topology_history topology_history; }; struct dc_phy_addr_space_config { @@ -1120,7 +1145,6 @@ struct dc_debug_options { uint32_t fpo_vactive_min_active_margin_us; uint32_t fpo_vactive_max_blank_us; bool enable_hpo_pg_support; - bool enable_legacy_fast_update; bool disable_dc_mode_overwrite; bool replay_skip_crtc_disabled; bool ignore_pg;/*do nothing, let pmfw control it*/ @@ -1152,7 +1176,6 @@ struct dc_debug_options { bool enable_ips_visual_confirm; unsigned int sharpen_policy; unsigned int scale_to_sharpness_policy; - bool skip_full_updated_if_possible; unsigned int enable_oled_edp_power_up_opt; bool enable_hblank_borrow; bool force_subvp_df_throttle; @@ -1164,6 +1187,7 @@ struct dc_debug_options { unsigned int auxless_alpm_lfps_t1t2_us; short auxless_alpm_lfps_t1t2_offset_us; bool disable_stutter_for_wm_program; + bool enable_block_sequence_programming; }; @@ -1702,6 +1726,7 @@ struct dc { struct dc_debug_options debug; struct dc_versions versions; struct dc_caps caps; + struct dc_check_config check_config; struct dc_cap_funcs cap_funcs; struct dc_config config; struct dc_bounding_box_overrides bb_overrides; @@ -1830,20 +1855,26 @@ struct dc_surface_update { }; struct dc_underflow_debug_data { - uint32_t otg_inst; - uint32_t otg_underflow; - uint32_t h_position; - uint32_t v_position; - uint32_t otg_frame_count; - struct dc_underflow_per_hubp_debug_data { - uint32_t hubp_underflow; - uint32_t hubp_in_blank; - uint32_t hubp_readline; - uint32_t det_config_error; - } hubps[MAX_PIPES]; - uint32_t curr_det_sizes[MAX_PIPES]; - uint32_t target_det_sizes[MAX_PIPES]; - uint32_t compbuf_config_error; + struct dcn_hubbub_reg_state *hubbub_reg_state; + struct dcn_hubp_reg_state *hubp_reg_state[MAX_PIPES]; + struct dcn_dpp_reg_state *dpp_reg_state[MAX_PIPES]; + struct dcn_mpc_reg_state *mpc_reg_state[MAX_PIPES]; + struct dcn_opp_reg_state *opp_reg_state[MAX_PIPES]; + struct dcn_dsc_reg_state *dsc_reg_state[MAX_PIPES]; + struct dcn_optc_reg_state *optc_reg_state[MAX_PIPES]; + struct dcn_dccg_reg_state *dccg_reg_state[MAX_PIPES]; +}; + +struct power_features { + bool ips; + bool rcg; + bool replay; + bool dds; + bool sprs; + bool psr; + bool fams; + bool mpo; + bool uclk_p_state; }; /* @@ -2688,6 +2719,13 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, uint32_t link_index, struct aux_payload *payload); +/* + * smart power OLED Interfaces + */ +bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits, + uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline); +bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL); + /* Get dc link index from dpia port index */ uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, uint8_t dpia_port_index); @@ -2721,6 +2759,8 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context); bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index); +void dc_log_preos_dmcub_info(const struct dc *dc); + /* DSC Interfaces */ #include "dc_dsc.h" @@ -2736,7 +2776,7 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream); bool dc_is_cursor_limit_pending(struct dc *dc); -bool dc_can_clear_cursor_limit(struct dc *dc); +bool dc_can_clear_cursor_limit(const struct dc *dc); /** * dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data. @@ -2751,4 +2791,493 @@ bool dc_can_clear_cursor_limit(struct dc *dc); */ void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, struct dc_underflow_debug_data *out_data); +void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, struct power_features *out_data); + +/** + * Software state variables used to program register fields across the display pipeline + */ +struct dc_register_software_state { + /* HUBP register programming variables for each pipe */ + struct { + bool valid_plane_state; + bool valid_stream; + bool min_dc_gfx_version9; + uint32_t vtg_sel; /* DCHUBP_CNTL->HUBP_VTG_SEL from pipe_ctx->stream_res.tg->inst */ + uint32_t hubp_clock_enable; /* HUBP_CLK_CNTL->HUBP_CLOCK_ENABLE from power management */ + uint32_t surface_pixel_format; /* DCSURF_SURFACE_CONFIG->SURFACE_PIXEL_FORMAT from plane_state->format */ + uint32_t rotation_angle; /* DCSURF_SURFACE_CONFIG->ROTATION_ANGLE from plane_state->rotation */ + uint32_t h_mirror_en; /* DCSURF_SURFACE_CONFIG->H_MIRROR_EN from plane_state->horizontal_mirror */ + uint32_t surface_dcc_en; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_EN from dcc->enable */ + uint32_t surface_size_width; /* HUBP_SIZE->SURFACE_SIZE_WIDTH from plane_size.surface_size.width */ + uint32_t surface_size_height; /* HUBP_SIZE->SURFACE_SIZE_HEIGHT from plane_size.surface_size.height */ + uint32_t pri_viewport_width; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_WIDTH from scaler_data.viewport.width */ + uint32_t pri_viewport_height; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_HEIGHT from scaler_data.viewport.height */ + uint32_t pri_viewport_x_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_X_START from scaler_data.viewport.x */ + uint32_t pri_viewport_y_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_Y_START from scaler_data.viewport.y */ + uint32_t cursor_enable; /* CURSOR_CONTROL->CURSOR_ENABLE from cursor_attributes.enable */ + uint32_t cursor_width; /* CURSOR_SETTINGS->CURSOR_WIDTH from cursor_position.width */ + uint32_t cursor_height; /* CURSOR_SETTINGS->CURSOR_HEIGHT from cursor_position.height */ + + /* Additional DCC configuration */ + uint32_t surface_dcc_ind_64b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_64B_BLK from dcc.independent_64b_blks */ + uint32_t surface_dcc_ind_128b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_128B_BLK from dcc.independent_128b_blks */ + + /* Surface pitch configuration */ + uint32_t surface_pitch; /* DCSURF_SURFACE_PITCH->PITCH from plane_size.surface_pitch */ + uint32_t meta_pitch; /* DCSURF_SURFACE_PITCH->META_PITCH from dcc.meta_pitch */ + uint32_t chroma_pitch; /* DCSURF_SURFACE_PITCH_C->PITCH_C from plane_size.chroma_pitch */ + uint32_t meta_pitch_c; /* DCSURF_SURFACE_PITCH_C->META_PITCH_C from dcc.meta_pitch_c */ + + /* Surface addresses */ + uint32_t primary_surface_address_low; /* DCSURF_PRIMARY_SURFACE_ADDRESS->PRIMARY_SURFACE_ADDRESS from address.grph.addr.low_part */ + uint32_t primary_surface_address_high; /* DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH->PRIMARY_SURFACE_ADDRESS_HIGH from address.grph.addr.high_part */ + uint32_t primary_meta_surface_address_low; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS->PRIMARY_META_SURFACE_ADDRESS from address.grph.meta_addr.low_part */ + uint32_t primary_meta_surface_address_high; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH->PRIMARY_META_SURFACE_ADDRESS_HIGH from address.grph.meta_addr.high_part */ + + /* TMZ configuration */ + uint32_t primary_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_TMZ from address.tmz_surface */ + uint32_t primary_meta_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_META_SURFACE_TMZ from address.tmz_surface */ + + /* Tiling configuration */ + uint32_t sw_mode; /* DCSURF_TILING_CONFIG->SW_MODE from tiling_info.gfx9.swizzle */ + uint32_t num_pipes; /* DCSURF_ADDR_CONFIG->NUM_PIPES from tiling_info.gfx9.num_pipes */ + uint32_t num_banks; /* DCSURF_ADDR_CONFIG->NUM_BANKS from tiling_info.gfx9.num_banks */ + uint32_t pipe_interleave; /* DCSURF_ADDR_CONFIG->PIPE_INTERLEAVE from tiling_info.gfx9.pipe_interleave */ + uint32_t num_shader_engines; /* DCSURF_ADDR_CONFIG->NUM_SE from tiling_info.gfx9.num_shader_engines */ + uint32_t num_rb_per_se; /* DCSURF_ADDR_CONFIG->NUM_RB_PER_SE from tiling_info.gfx9.num_rb_per_se */ + uint32_t num_pkrs; /* DCSURF_ADDR_CONFIG->NUM_PKRS from tiling_info.gfx9.num_pkrs */ + + /* DML Request Size Configuration - Luma */ + uint32_t rq_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->CHUNK_SIZE from rq_regs.rq_regs_l.chunk_size */ + uint32_t rq_min_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_CHUNK_SIZE from rq_regs.rq_regs_l.min_chunk_size */ + uint32_t rq_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->META_CHUNK_SIZE from rq_regs.rq_regs_l.meta_chunk_size */ + uint32_t rq_min_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_META_CHUNK_SIZE from rq_regs.rq_regs_l.min_meta_chunk_size */ + uint32_t rq_dpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->DPTE_GROUP_SIZE from rq_regs.rq_regs_l.dpte_group_size */ + uint32_t rq_mpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->MPTE_GROUP_SIZE from rq_regs.rq_regs_l.mpte_group_size */ + uint32_t rq_swath_height_l; /* DCHUBP_REQ_SIZE_CONFIG->SWATH_HEIGHT_L from rq_regs.rq_regs_l.swath_height */ + uint32_t rq_pte_row_height_l; /* DCHUBP_REQ_SIZE_CONFIG->PTE_ROW_HEIGHT_L from rq_regs.rq_regs_l.pte_row_height */ + + /* DML Request Size Configuration - Chroma */ + uint32_t rq_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->CHUNK_SIZE_C from rq_regs.rq_regs_c.chunk_size */ + uint32_t rq_min_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_chunk_size */ + uint32_t rq_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->META_CHUNK_SIZE_C from rq_regs.rq_regs_c.meta_chunk_size */ + uint32_t rq_min_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_META_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_meta_chunk_size */ + uint32_t rq_dpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->DPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.dpte_group_size */ + uint32_t rq_mpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.mpte_group_size */ + uint32_t rq_swath_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->SWATH_HEIGHT_C from rq_regs.rq_regs_c.swath_height */ + uint32_t rq_pte_row_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->PTE_ROW_HEIGHT_C from rq_regs.rq_regs_c.pte_row_height */ + + /* DML Expansion Modes */ + uint32_t drq_expansion_mode; /* DCN_EXPANSION_MODE->DRQ_EXPANSION_MODE from rq_regs.drq_expansion_mode */ + uint32_t prq_expansion_mode; /* DCN_EXPANSION_MODE->PRQ_EXPANSION_MODE from rq_regs.prq_expansion_mode */ + uint32_t mrq_expansion_mode; /* DCN_EXPANSION_MODE->MRQ_EXPANSION_MODE from rq_regs.mrq_expansion_mode */ + uint32_t crq_expansion_mode; /* DCN_EXPANSION_MODE->CRQ_EXPANSION_MODE from rq_regs.crq_expansion_mode */ + + /* DML DLG parameters - nominal */ + uint32_t dst_y_per_vm_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_VM_VBLANK from dlg_regs.dst_y_per_vm_vblank */ + uint32_t dst_y_per_row_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_ROW_VBLANK from dlg_regs.dst_y_per_row_vblank */ + uint32_t dst_y_per_vm_flip; /* NOM_PARAMETERS_1->DST_Y_PER_VM_FLIP from dlg_regs.dst_y_per_vm_flip */ + uint32_t dst_y_per_row_flip; /* NOM_PARAMETERS_1->DST_Y_PER_ROW_FLIP from dlg_regs.dst_y_per_row_flip */ + + /* DML prefetch settings */ + uint32_t dst_y_prefetch; /* PREFETCH_SETTINS->DST_Y_PREFETCH from dlg_regs.dst_y_prefetch */ + uint32_t vratio_prefetch; /* PREFETCH_SETTINS->VRATIO_PREFETCH from dlg_regs.vratio_prefetch */ + uint32_t vratio_prefetch_c; /* PREFETCH_SETTINS_C->VRATIO_PREFETCH_C from dlg_regs.vratio_prefetch_c */ + + /* TTU parameters */ + uint32_t qos_level_low_wm; /* TTU_CNTL1->QoSLevelLowWaterMark from ttu_regs.qos_level_low_wm */ + uint32_t qos_level_high_wm; /* TTU_CNTL1->QoSLevelHighWaterMark from ttu_regs.qos_level_high_wm */ + uint32_t qos_level_flip; /* TTU_CNTL2->QoS_LEVEL_FLIP_L from ttu_regs.qos_level_flip */ + uint32_t min_ttu_vblank; /* DCN_GLOBAL_TTU_CNTL->MIN_TTU_VBLANK from ttu_regs.min_ttu_vblank */ + } hubp[MAX_PIPES]; + + /* HUBBUB register programming variables */ + struct { + /* Individual DET buffer control per pipe - software state that programs DET registers */ + uint32_t det0_size; /* DCHUBBUB_DET0_CTRL->DET0_SIZE from hubbub->funcs->program_det_size(hubbub, 0, det_buffer_size_kb) */ + uint32_t det1_size; /* DCHUBBUB_DET1_CTRL->DET1_SIZE from hubbub->funcs->program_det_size(hubbub, 1, det_buffer_size_kb) */ + uint32_t det2_size; /* DCHUBBUB_DET2_CTRL->DET2_SIZE from hubbub->funcs->program_det_size(hubbub, 2, det_buffer_size_kb) */ + uint32_t det3_size; /* DCHUBBUB_DET3_CTRL->DET3_SIZE from hubbub->funcs->program_det_size(hubbub, 3, det_buffer_size_kb) */ + + /* Compression buffer control - software state that programs COMPBUF registers */ + uint32_t compbuf_size; /* DCHUBBUB_COMPBUF_CTRL->COMPBUF_SIZE from hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, safe_to_increase) */ + uint32_t compbuf_reserved_space_64b; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_64B from hubbub2->pixel_chunk_size / 32 */ + uint32_t compbuf_reserved_space_zs; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_ZS from hubbub2->pixel_chunk_size / 128 */ + } hubbub; + + /* DPP register programming variables for each pipe (simplified for available fields) */ + struct { + uint32_t dpp_clock_enable; /* DPP_CONTROL->DPP_CLOCK_ENABLE from dppclk_enable */ + + /* Recout (Rectangle of Interest) configuration */ + uint32_t recout_start_x; /* RECOUT_START->RECOUT_START_X from pipe_ctx->plane_res.scl_data.recout.x */ + uint32_t recout_start_y; /* RECOUT_START->RECOUT_START_Y from pipe_ctx->plane_res.scl_data.recout.y */ + uint32_t recout_width; /* RECOUT_SIZE->RECOUT_WIDTH from pipe_ctx->plane_res.scl_data.recout.width */ + uint32_t recout_height; /* RECOUT_SIZE->RECOUT_HEIGHT from pipe_ctx->plane_res.scl_data.recout.height */ + + /* MPC (Multiple Pipe/Plane Combiner) size configuration */ + uint32_t mpc_width; /* MPC_SIZE->MPC_WIDTH from pipe_ctx->plane_res.scl_data.h_active */ + uint32_t mpc_height; /* MPC_SIZE->MPC_HEIGHT from pipe_ctx->plane_res.scl_data.v_active */ + + /* DSCL mode configuration */ + uint32_t dscl_mode; /* SCL_MODE->DSCL_MODE from pipe_ctx->plane_res.scl_data.dscl_prog_data.dscl_mode */ + + /* Scaler ratios (simplified to integer parts) */ + uint32_t horz_ratio_int; /* SCL_HORZ_FILTER_SCALE_RATIO->SCL_H_SCALE_RATIO integer part from ratios.horz */ + uint32_t vert_ratio_int; /* SCL_VERT_FILTER_SCALE_RATIO->SCL_V_SCALE_RATIO integer part from ratios.vert */ + + /* Basic scaler taps */ + uint32_t h_taps; /* SCL_TAP_CONTROL->SCL_H_NUM_TAPS from taps.h_taps */ + uint32_t v_taps; /* SCL_TAP_CONTROL->SCL_V_NUM_TAPS from taps.v_taps */ + } dpp[MAX_PIPES]; + + /* DCCG register programming variables */ + struct { + /* Core Display Clock Control */ + uint32_t dispclk_khz; /* DENTIST_DISPCLK_CNTL->DENTIST_DISPCLK_WDIVIDER from clk_mgr.dispclk_khz */ + uint32_t dc_mem_global_pwr_req_dis; /* DC_MEM_GLOBAL_PWR_REQ_CNTL->DC_MEM_GLOBAL_PWR_REQ_DIS from memory power management settings */ + + /* DPP Clock Control - 4 fields per pipe */ + uint32_t dppclk_khz[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK_R_GATE_DISABLE from dpp_clocks[pipe] */ + uint32_t dppclk_enable[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK0_EN,DPPCLK1_EN,DPPCLK2_EN,DPPCLK3_EN from dccg31_update_dpp_dto() */ + uint32_t dppclk_dto_enable[MAX_PIPES]; /* DPPCLK_DTO_CTRL->DPPCLK_DTO_ENABLE from dccg->dpp_clock_gated[dpp_inst] state */ + uint32_t dppclk_dto_phase[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_PHASE from phase calculation req_dppclk/ref_dppclk */ + uint32_t dppclk_dto_modulo[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_MODULO from modulo = 0xff */ + + /* DSC Clock Control - 4 fields per DSC resource */ + uint32_t dscclk_khz[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK_DTO_ENABLE from dsc_clocks */ + uint32_t dscclk_dto_enable[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK0_DTO_ENABLE,DSCCLK1_DTO_ENABLE,DSCCLK2_DTO_ENABLE,DSCCLK3_DTO_ENABLE */ + uint32_t dscclk_dto_phase[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_PHASE from dccg31_enable_dscclk() */ + uint32_t dscclk_dto_modulo[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_MODULO from dccg31_enable_dscclk() */ + + /* Pixel Clock Control - per pipe */ + uint32_t pixclk_khz[MAX_PIPES]; /* PIXCLK_RESYNC_CNTL->PIXCLK_RESYNC_ENABLE from stream.timing.pix_clk_100hz */ + uint32_t otg_pixel_rate_div[MAX_PIPES]; /* OTG_PIXEL_RATE_DIV->OTG_PIXEL_RATE_DIV from OTG pixel rate divider control */ + uint32_t dtbclk_dto_enable[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_ENABLE from dccg31_set_dtbclk_dto() */ + uint32_t pipe_dto_src_sel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->PIPE_DTO_SRC_SEL from dccg31_set_dtbclk_dto() source selection */ + uint32_t dtbclk_dto_div[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_DIV from dtbdto_div calculation */ + uint32_t otg_add_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_ADD_PIXEL from dccg31_otg_add_pixel() */ + uint32_t otg_drop_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_DROP_PIXEL from dccg31_otg_drop_pixel() */ + + /* DTBCLK DTO Control - 4 DTOs */ + uint32_t dtbclk_dto_modulo[4]; /* DTBCLK_DTO0_MODULO->DTBCLK_DTO0_MODULO from dccg31_set_dtbclk_dto() modulo calculation */ + uint32_t dtbclk_dto_phase[4]; /* DTBCLK_DTO0_PHASE->DTBCLK_DTO0_PHASE from phase calculation pixclk_khz/ref_dtbclk_khz */ + uint32_t dtbclk_dto_dbuf_en; /* DTBCLK_DTO_DBUF_EN->DTBCLK DTO data buffer enable */ + + /* DP Stream Clock Control - 4 pipes */ + uint32_t dpstreamclk_enable[MAX_PIPES]; /* DPSTREAMCLK_CNTL->DPSTREAMCLK_PIPE0_EN,DPSTREAMCLK_PIPE1_EN,DPSTREAMCLK_PIPE2_EN,DPSTREAMCLK_PIPE3_EN */ + uint32_t dp_dto_modulo[4]; /* DP_DTO0_MODULO->DP_DTO0_MODULO from DP stream DTO programming */ + uint32_t dp_dto_phase[4]; /* DP_DTO0_PHASE->DP_DTO0_PHASE from DP stream DTO programming */ + uint32_t dp_dto_dbuf_en; /* DP_DTO_DBUF_EN->DP DTO data buffer enable */ + + /* PHY Symbol Clock Control - 5 PHYs (A,B,C,D,E) */ + uint32_t phy_symclk_force_en[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_EN from dccg31_set_physymclk() force_enable */ + uint32_t phy_symclk_force_src_sel[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_SRC_SEL from dccg31_set_physymclk() clk_src */ + uint32_t phy_symclk_gate_disable[5]; /* DCCG_GATE_DISABLE_CNTL2->PHYASYMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.physymclk */ + + /* SYMCLK32 SE Control - 4 instances */ + uint32_t symclk32_se_src_sel[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_SRC_SEL from dccg31_enable_symclk32_se() with get_phy_mux_symclk() mapping */ + uint32_t symclk32_se_enable[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_EN from dccg31_enable_symclk32_se() enable */ + uint32_t symclk32_se_gate_disable[4]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_SE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_se */ + + /* SYMCLK32 LE Control - 2 instances */ + uint32_t symclk32_le_src_sel[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_SRC_SEL from dccg31_enable_symclk32_le() phyd32clk source */ + uint32_t symclk32_le_enable[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_EN from dccg31_enable_symclk32_le() enable */ + uint32_t symclk32_le_gate_disable[2]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_LE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_le */ + + /* DPIA Clock Control */ + uint32_t dpiaclk_540m_dto_modulo; /* DPIACLK_540M_DTO_MODULO->DPIA 540MHz DTO modulo */ + uint32_t dpiaclk_540m_dto_phase; /* DPIACLK_540M_DTO_PHASE->DPIA 540MHz DTO phase */ + uint32_t dpiaclk_810m_dto_modulo; /* DPIACLK_810M_DTO_MODULO->DPIA 810MHz DTO modulo */ + uint32_t dpiaclk_810m_dto_phase; /* DPIACLK_810M_DTO_PHASE->DPIA 810MHz DTO phase */ + uint32_t dpiaclk_dto_cntl; /* DPIACLK_DTO_CNTL->DPIA clock DTO control */ + uint32_t dpiasymclk_cntl; /* DPIASYMCLK_CNTL->DPIA symbol clock control */ + + /* Clock Gating Control */ + uint32_t dccg_gate_disable_cntl; /* DCCG_GATE_DISABLE_CNTL->Clock gate disable control from dccg31_init() */ + uint32_t dpstreamclk_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */ + uint32_t dpstreamclk_root_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_ROOT_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */ + + /* VSync Control */ + uint32_t vsync_cnt_ctrl; /* DCCG_VSYNC_CNT_CTRL->VSync counter control */ + uint32_t vsync_cnt_int_ctrl; /* DCCG_VSYNC_CNT_INT_CTRL->VSync counter interrupt control */ + uint32_t vsync_otg_latch_value[6]; /* DCCG_VSYNC_OTG0_LATCH_VALUE->OTG0 VSync latch value (for OTG0-5) */ + + /* Time Base Control */ + uint32_t microsecond_time_base_div; /* MICROSECOND_TIME_BASE_DIV->Microsecond time base divider */ + uint32_t millisecond_time_base_div; /* MILLISECOND_TIME_BASE_DIV->Millisecond time base divider */ + } dccg; + + /* DSC essential configuration for underflow analysis */ + struct { + /* DSC active state - critical for bandwidth analysis */ + uint32_t dsc_clock_enable; /* DSC enabled - affects bandwidth requirements */ + + /* DSC configuration affecting bandwidth and timing */ + uint32_t dsc_num_slices_h; /* Horizontal slice count - affects throughput */ + uint32_t dsc_num_slices_v; /* Vertical slice count - affects throughput */ + uint32_t dsc_bits_per_pixel; /* Compression ratio - affects bandwidth */ + + /* OPP integration - affects pipeline flow */ + uint32_t dscrm_dsc_forward_enable; /* DSC forwarding to OPP enabled */ + uint32_t dscrm_dsc_opp_pipe_source; /* Which OPP receives DSC output */ + } dsc[MAX_PIPES]; + + /* MPC register programming variables */ + struct { + /* MPCC blending tree and mode control */ + uint32_t mpcc_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_MODE from blend_cfg.blend_mode */ + uint32_t mpcc_alpha_blend_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_BLND_MODE from blend_cfg.alpha_mode */ + uint32_t mpcc_alpha_multiplied_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_MULTIPLIED_MODE from blend_cfg.pre_multiplied_alpha */ + uint32_t mpcc_blnd_active_overlap_only[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BLND_ACTIVE_OVERLAP_ONLY from blend_cfg.overlap_only */ + uint32_t mpcc_global_alpha[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_ALPHA from blend_cfg.global_alpha */ + uint32_t mpcc_global_gain[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_GAIN from blend_cfg.global_gain */ + uint32_t mpcc_bg_bpc[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BG_BPC from background color depth */ + uint32_t mpcc_bot_gain_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BOT_GAIN_MODE from bottom layer gain control */ + + /* MPCC blending tree connections */ + uint32_t mpcc_bot_sel[MAX_PIPES]; /* MPCC_BOT_SEL->MPCC_BOT_SEL from mpcc_state->bot_sel */ + uint32_t mpcc_top_sel[MAX_PIPES]; /* MPCC_TOP_SEL->MPCC_TOP_SEL from mpcc_state->dpp_id */ + + /* MPCC output gamma control */ + uint32_t mpcc_ogam_mode[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_MODE from output gamma mode */ + uint32_t mpcc_ogam_select[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_SELECT from gamma LUT bank selection */ + uint32_t mpcc_ogam_pwl_disable[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_PWL_DISABLE from PWL control */ + + /* MPCC pipe assignment and status */ + uint32_t mpcc_opp_id[MAX_PIPES]; /* MPCC_OPP_ID->MPCC_OPP_ID from mpcc_state->opp_id */ + uint32_t mpcc_idle[MAX_PIPES]; /* MPCC_STATUS->MPCC_IDLE from mpcc idle status */ + uint32_t mpcc_busy[MAX_PIPES]; /* MPCC_STATUS->MPCC_BUSY from mpcc busy status */ + + /* MPC output processing */ + uint32_t mpc_out_csc_mode; /* MPC_OUT_CSC_COEF->MPC_OUT_CSC_MODE from output_csc */ + uint32_t mpc_out_gamma_mode; /* MPC_OUT_GAMMA_LUT->MPC_OUT_GAMMA_MODE from output_gamma */ + } mpc; + + /* OPP register programming variables for each pipe */ + struct { + /* Display Pattern Generator (DPG) Control - 19 fields from DPG_CONTROL register */ + uint32_t dpg_enable; /* DPG_CONTROL->DPG_EN from test_pattern parameter (enable/disable) */ + + /* Format Control (FMT) - 18 fields from FMT_CONTROL register */ + uint32_t fmt_pixel_encoding; /* FMT_CONTROL->FMT_PIXEL_ENCODING from clamping->pixel_encoding */ + uint32_t fmt_subsampling_mode; /* FMT_CONTROL->FMT_SUBSAMPLING_MODE from force_chroma_subsampling_1tap */ + uint32_t fmt_cbcr_bit_reduction_bypass; /* FMT_CONTROL->FMT_CBCR_BIT_REDUCTION_BYPASS from pixel_encoding bypass control */ + uint32_t fmt_stereosync_override; /* FMT_CONTROL->FMT_STEREOSYNC_OVERRIDE from stereo timing override */ + uint32_t fmt_spatial_dither_frame_counter_max; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX from fmt_bit_depth->flags */ + uint32_t fmt_spatial_dither_frame_counter_bit_swap; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP from dither control */ + uint32_t fmt_truncate_enable; /* FMT_CONTROL->FMT_TRUNCATE_EN from fmt_bit_depth->flags.TRUNCATE_ENABLED */ + uint32_t fmt_truncate_depth; /* FMT_CONTROL->FMT_TRUNCATE_DEPTH from fmt_bit_depth->flags.TRUNCATE_DEPTH */ + uint32_t fmt_truncate_mode; /* FMT_CONTROL->FMT_TRUNCATE_MODE from fmt_bit_depth->flags.TRUNCATE_MODE */ + uint32_t fmt_spatial_dither_enable; /* FMT_CONTROL->FMT_SPATIAL_DITHER_EN from fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED */ + uint32_t fmt_spatial_dither_mode; /* FMT_CONTROL->FMT_SPATIAL_DITHER_MODE from fmt_bit_depth->flags.SPATIAL_DITHER_MODE */ + uint32_t fmt_spatial_dither_depth; /* FMT_CONTROL->FMT_SPATIAL_DITHER_DEPTH from fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH */ + uint32_t fmt_temporal_dither_enable; /* FMT_CONTROL->FMT_TEMPORAL_DITHER_EN from fmt_bit_depth->flags.TEMPORAL_DITHER_ENABLED */ + uint32_t fmt_clamp_data_enable; /* FMT_CONTROL->FMT_CLAMP_DATA_EN from clamping->clamping_range enable */ + uint32_t fmt_clamp_color_format; /* FMT_CONTROL->FMT_CLAMP_COLOR_FORMAT from clamping->color_format */ + uint32_t fmt_dynamic_exp_enable; /* FMT_CONTROL->FMT_DYNAMIC_EXP_EN from color_sp/color_dpth/signal */ + uint32_t fmt_dynamic_exp_mode; /* FMT_CONTROL->FMT_DYNAMIC_EXP_MODE from color space mode mapping */ + uint32_t fmt_bit_depth_control; /* Legacy field - kept for compatibility */ + + /* OPP Pipe Control - 1 field from OPP_PIPE_CONTROL register */ + uint32_t opp_pipe_clock_enable; /* OPP_PIPE_CONTROL->OPP_PIPE_CLOCK_EN from enable parameter (bool) */ + + /* OPP CRC Control - 3 fields from OPP_PIPE_CRC_CONTROL register */ + uint32_t opp_crc_enable; /* OPP_PIPE_CRC_CONTROL->CRC_EN from CRC enable control */ + uint32_t opp_crc_select_source; /* OPP_PIPE_CRC_CONTROL->CRC_SELECT_SOURCE from CRC source selection */ + uint32_t opp_crc_stereo_cont; /* OPP_PIPE_CRC_CONTROL->CRC_STEREO_CONT from stereo continuous CRC */ + + /* Output Buffer (OPPBUF) Control - 6 fields from OPPBUF_CONTROL register */ + uint32_t oppbuf_active_width; /* OPPBUF_CONTROL->OPPBUF_ACTIVE_WIDTH from oppbuf_params->active_width */ + uint32_t oppbuf_pixel_repetition; /* OPPBUF_CONTROL->OPPBUF_PIXEL_REPETITION from oppbuf_params->pixel_repetition */ + uint32_t oppbuf_display_segmentation; /* OPPBUF_CONTROL->OPPBUF_DISPLAY_SEGMENTATION from oppbuf_params->mso_segmentation */ + uint32_t oppbuf_overlap_pixel_num; /* OPPBUF_CONTROL->OPPBUF_OVERLAP_PIXEL_NUM from oppbuf_params->mso_overlap_pixel_num */ + uint32_t oppbuf_3d_vact_space1_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE1_SIZE from 3D timing space1_size */ + uint32_t oppbuf_3d_vact_space2_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE2_SIZE from 3D timing space2_size */ + + /* DSC Forward Config - 3 fields from DSCRM_DSC_FORWARD_CONFIG register */ + uint32_t dscrm_dsc_forward_enable; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN from DSC forward enable control */ + uint32_t dscrm_dsc_opp_pipe_source; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_OPP_PIPE_SOURCE from opp_pipe parameter */ + uint32_t dscrm_dsc_forward_enable_status; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN_STATUS from DSC forward status (read-only) */ + } opp[MAX_PIPES]; + + /* OPTC register programming variables for each pipe */ + struct { + uint32_t otg_master_inst; + + /* OTG_CONTROL register - 5 fields for OTG control */ + uint32_t otg_master_enable; /* OTG_CONTROL->OTG_MASTER_EN from timing enable/disable control */ + uint32_t otg_disable_point_cntl; /* OTG_CONTROL->OTG_DISABLE_POINT_CNTL from disable timing control */ + uint32_t otg_start_point_cntl; /* OTG_CONTROL->OTG_START_POINT_CNTL from start timing control */ + uint32_t otg_field_number_cntl; /* OTG_CONTROL->OTG_FIELD_NUMBER_CNTL from interlace field control */ + uint32_t otg_out_mux; /* OTG_CONTROL->OTG_OUT_MUX from output mux selection */ + + /* OTG Horizontal Timing - 7 fields */ + uint32_t otg_h_total; /* OTG_H_TOTAL->OTG_H_TOTAL from dc_crtc_timing->h_total */ + uint32_t otg_h_blank_start; /* OTG_H_BLANK_START_END->OTG_H_BLANK_START from dc_crtc_timing->h_front_porch */ + uint32_t otg_h_blank_end; /* OTG_H_BLANK_START_END->OTG_H_BLANK_END from dc_crtc_timing->h_addressable_video_pixel_width */ + uint32_t otg_h_sync_start; /* OTG_H_SYNC_A->OTG_H_SYNC_A_START from dc_crtc_timing->h_sync_width */ + uint32_t otg_h_sync_end; /* OTG_H_SYNC_A->OTG_H_SYNC_A_END from calculated sync end position */ + uint32_t otg_h_sync_polarity; /* OTG_H_SYNC_A_CNTL->OTG_H_SYNC_A_POL from dc_crtc_timing->flags.HSYNC_POSITIVE_POLARITY */ + uint32_t otg_h_timing_div_mode; /* OTG_H_TIMING_CNTL->OTG_H_TIMING_DIV_MODE from horizontal timing division mode */ + + /* OTG Vertical Timing - 7 fields */ + uint32_t otg_v_total; /* OTG_V_TOTAL->OTG_V_TOTAL from dc_crtc_timing->v_total */ + uint32_t otg_v_blank_start; /* OTG_V_BLANK_START_END->OTG_V_BLANK_START from dc_crtc_timing->v_front_porch */ + uint32_t otg_v_blank_end; /* OTG_V_BLANK_START_END->OTG_V_BLANK_END from dc_crtc_timing->v_addressable_video_line_width */ + uint32_t otg_v_sync_start; /* OTG_V_SYNC_A->OTG_V_SYNC_A_START from dc_crtc_timing->v_sync_width */ + uint32_t otg_v_sync_end; /* OTG_V_SYNC_A->OTG_V_SYNC_A_END from calculated sync end position */ + uint32_t otg_v_sync_polarity; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_A_POL from dc_crtc_timing->flags.VSYNC_POSITIVE_POLARITY */ + uint32_t otg_v_sync_mode; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_MODE from sync mode selection */ + + /* OTG DRR (Dynamic Refresh Rate) Control - 8 fields */ + uint32_t otg_v_total_max; /* OTG_V_TOTAL_MAX->OTG_V_TOTAL_MAX from drr_params->vertical_total_max */ + uint32_t otg_v_total_min; /* OTG_V_TOTAL_MIN->OTG_V_TOTAL_MIN from drr_params->vertical_total_min */ + uint32_t otg_v_total_mid; /* OTG_V_TOTAL_MID->OTG_V_TOTAL_MID from drr_params->vertical_total_mid */ + uint32_t otg_v_total_max_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MAX_SEL from DRR max selection enable */ + uint32_t otg_v_total_min_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MIN_SEL from DRR min selection enable */ + uint32_t otg_vtotal_mid_replacing_max_en; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_REPLACING_MAX_EN from DRR mid-frame enable */ + uint32_t otg_vtotal_mid_frame_num; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_FRAME_NUM from drr_params->vertical_total_mid_frame_num */ + uint32_t otg_set_v_total_min_mask; /* OTG_V_TOTAL_CONTROL->OTG_SET_V_TOTAL_MIN_MASK from DRR trigger mask */ + uint32_t otg_force_lock_on_event; /* OTG_V_TOTAL_CONTROL->OTG_FORCE_LOCK_ON_EVENT from DRR force lock control */ + + /* OPTC Data Source and ODM - 6 fields */ + uint32_t optc_seg0_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG0_SRC_SEL from opp_id[0] ODM segment 0 source */ + uint32_t optc_seg1_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG1_SRC_SEL from opp_id[1] ODM segment 1 source */ + uint32_t optc_seg2_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG2_SRC_SEL from opp_id[2] ODM segment 2 source */ + uint32_t optc_seg3_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG3_SRC_SEL from opp_id[3] ODM segment 3 source */ + uint32_t optc_num_of_input_segment; /* OPTC_DATA_SOURCE_SELECT->OPTC_NUM_OF_INPUT_SEGMENT from opp_cnt-1 number of input segments */ + uint32_t optc_mem_sel; /* OPTC_MEMORY_CONFIG->OPTC_MEM_SEL from memory_mask ODM memory selection */ + + /* OPTC Data Format and DSC - 4 fields */ + uint32_t optc_data_format; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DATA_FORMAT from data format selection */ + uint32_t optc_dsc_mode; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DSC_MODE from dsc_mode parameter */ + uint32_t optc_dsc_bytes_per_pixel; /* OPTC_BYTES_PER_PIXEL->OPTC_DSC_BYTES_PER_PIXEL from dsc_bytes_per_pixel parameter */ + uint32_t optc_segment_width; /* OPTC_WIDTH_CONTROL->OPTC_SEGMENT_WIDTH from segment_width parameter */ + uint32_t optc_dsc_slice_width; /* OPTC_WIDTH_CONTROL->OPTC_DSC_SLICE_WIDTH from dsc_slice_width parameter */ + + /* OPTC Clock and Underflow Control - 4 fields */ + uint32_t optc_input_pix_clk_en; /* OPTC_INPUT_CLOCK_CONTROL->OPTC_INPUT_PIX_CLK_EN from pixel clock enable */ + uint32_t optc_underflow_occurred_status; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_OCCURRED_STATUS from underflow status (read-only) */ + uint32_t optc_underflow_clear; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_CLEAR from underflow clear control */ + uint32_t otg_clock_enable; /* OTG_CLOCK_CONTROL->OTG_CLOCK_EN from OTG clock enable */ + uint32_t otg_clock_gate_dis; /* OTG_CLOCK_CONTROL->OTG_CLOCK_GATE_DIS from clock gate disable */ + + /* OTG Stereo and 3D Control - 6 fields */ + uint32_t otg_stereo_enable; /* OTG_STEREO_CONTROL->OTG_STEREO_EN from stereo enable control */ + uint32_t otg_stereo_sync_output_line_num; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_LINE_NUM from timing->stereo_3d_format line num */ + uint32_t otg_stereo_sync_output_polarity; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_POLARITY from stereo polarity control */ + uint32_t otg_3d_structure_en; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_EN from 3D structure enable */ + uint32_t otg_3d_structure_v_update_mode; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_V_UPDATE_MODE from 3D vertical update mode */ + uint32_t otg_3d_structure_stereo_sel_ovr; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_STEREO_SEL_OVR from 3D stereo selection override */ + uint32_t otg_interlace_enable; /* OTG_INTERLACE_CONTROL->OTG_INTERLACE_ENABLE from dc_crtc_timing->flags.INTERLACE */ + + /* OTG GSL (Global Sync Lock) Control - 5 fields */ + uint32_t otg_gsl0_en; /* OTG_GSL_CONTROL->OTG_GSL0_EN from GSL group 0 enable */ + uint32_t otg_gsl1_en; /* OTG_GSL_CONTROL->OTG_GSL1_EN from GSL group 1 enable */ + uint32_t otg_gsl2_en; /* OTG_GSL_CONTROL->OTG_GSL2_EN from GSL group 2 enable */ + uint32_t otg_gsl_master_en; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_EN from GSL master enable */ + uint32_t otg_gsl_master_mode; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_MODE from gsl_params->gsl_master mode */ + + /* OTG DRR Advanced Control - 4 fields */ + uint32_t otg_v_total_last_used_by_drr; /* OTG_DRR_CONTROL->OTG_V_TOTAL_LAST_USED_BY_DRR from last used DRR V_TOTAL (read-only) */ + uint32_t otg_drr_trigger_window_start_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_START_X from window_start parameter */ + uint32_t otg_drr_trigger_window_end_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_END_X from window_end parameter */ + uint32_t otg_drr_v_total_change_limit; /* OTG_DRR_V_TOTAL_CHANGE->OTG_DRR_V_TOTAL_CHANGE_LIMIT from limit parameter */ + + /* OTG DSC Position Control - 2 fields */ + uint32_t otg_dsc_start_position_x; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_X from DSC start X position */ + uint32_t otg_dsc_start_position_line_num; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_LINE_NUM from DSC start line number */ + + /* OTG Double Buffer Control - 2 fields */ + uint32_t otg_drr_timing_dbuf_update_mode; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_DRR_TIMING_DBUF_UPDATE_MODE from DRR double buffer mode */ + uint32_t otg_blank_data_double_buffer_en; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_BLANK_DATA_DOUBLE_BUFFER_EN from blank data double buffer enable */ + + /* OTG Vertical Interrupts - 6 fields */ + uint32_t otg_vertical_interrupt0_int_enable; /* OTG_VERTICAL_INTERRUPT0_CONTROL->OTG_VERTICAL_INTERRUPT0_INT_ENABLE from interrupt 0 enable */ + uint32_t otg_vertical_interrupt0_line_start; /* OTG_VERTICAL_INTERRUPT0_POSITION->OTG_VERTICAL_INTERRUPT0_LINE_START from start_line parameter */ + uint32_t otg_vertical_interrupt1_int_enable; /* OTG_VERTICAL_INTERRUPT1_CONTROL->OTG_VERTICAL_INTERRUPT1_INT_ENABLE from interrupt 1 enable */ + uint32_t otg_vertical_interrupt1_line_start; /* OTG_VERTICAL_INTERRUPT1_POSITION->OTG_VERTICAL_INTERRUPT1_LINE_START from start_line parameter */ + uint32_t otg_vertical_interrupt2_int_enable; /* OTG_VERTICAL_INTERRUPT2_CONTROL->OTG_VERTICAL_INTERRUPT2_INT_ENABLE from interrupt 2 enable */ + uint32_t otg_vertical_interrupt2_line_start; /* OTG_VERTICAL_INTERRUPT2_POSITION->OTG_VERTICAL_INTERRUPT2_LINE_START from start_line parameter */ + + /* OTG Global Sync Parameters - 6 fields */ + uint32_t otg_vready_offset; /* OTG_VREADY_PARAM->OTG_VREADY_OFFSET from vready_offset parameter */ + uint32_t otg_vstartup_start; /* OTG_VSTARTUP_PARAM->OTG_VSTARTUP_START from vstartup_start parameter */ + uint32_t otg_vupdate_offset; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_OFFSET from vupdate_offset parameter */ + uint32_t otg_vupdate_width; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_WIDTH from vupdate_width parameter */ + uint32_t master_update_lock_vupdate_keepout_start_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET from pstate_keepout start */ + uint32_t master_update_lock_vupdate_keepout_end_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET from pstate_keepout end */ + + /* OTG Manual Trigger Control - 11 fields */ + uint32_t otg_triga_source_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_SELECT from trigger A source selection */ + uint32_t otg_triga_source_pipe_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_PIPE_SELECT from trigger A pipe selection */ + uint32_t otg_triga_rising_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_RISING_EDGE_DETECT_CNTL from trigger A rising edge detect */ + uint32_t otg_triga_falling_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_FALLING_EDGE_DETECT_CNTL from trigger A falling edge detect */ + uint32_t otg_triga_polarity_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_POLARITY_SELECT from trigger A polarity selection */ + uint32_t otg_triga_frequency_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_FREQUENCY_SELECT from trigger A frequency selection */ + uint32_t otg_triga_delay; /* OTG_TRIGA_CNTL->OTG_TRIGA_DELAY from trigger A delay */ + uint32_t otg_triga_clear; /* OTG_TRIGA_CNTL->OTG_TRIGA_CLEAR from trigger A clear */ + uint32_t otg_triga_manual_trig; /* OTG_TRIGA_MANUAL_TRIG->OTG_TRIGA_MANUAL_TRIG from manual trigger A */ + uint32_t otg_trigb_source_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_SOURCE_SELECT from trigger B source selection */ + uint32_t otg_trigb_polarity_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_POLARITY_SELECT from trigger B polarity selection */ + uint32_t otg_trigb_manual_trig; /* OTG_TRIGB_MANUAL_TRIG->OTG_TRIGB_MANUAL_TRIG from manual trigger B */ + + /* OTG Static Screen and Update Control - 6 fields */ + uint32_t otg_static_screen_event_mask; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_EVENT_MASK from event_triggers parameter */ + uint32_t otg_static_screen_frame_count; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_FRAME_COUNT from num_frames parameter */ + uint32_t master_update_lock; /* OTG_MASTER_UPDATE_LOCK->MASTER_UPDATE_LOCK from update lock control */ + uint32_t master_update_mode; /* OTG_MASTER_UPDATE_MODE->MASTER_UPDATE_MODE from update mode selection */ + uint32_t otg_force_count_now_mode; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_MODE from force count mode */ + uint32_t otg_force_count_now_clear; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_CLEAR from force count clear */ + + /* VTG Control - 3 fields */ + uint32_t vtg0_enable; /* CONTROL->VTG0_ENABLE from VTG enable control */ + uint32_t vtg0_fp2; /* CONTROL->VTG0_FP2 from VTG front porch 2 */ + uint32_t vtg0_vcount_init; /* CONTROL->VTG0_VCOUNT_INIT from VTG vertical count init */ + + /* OTG Status (Read-Only) - 12 fields */ + uint32_t otg_v_blank; /* OTG_STATUS->OTG_V_BLANK from vertical blank status (read-only) */ + uint32_t otg_v_active_disp; /* OTG_STATUS->OTG_V_ACTIVE_DISP from vertical active display (read-only) */ + uint32_t otg_frame_count; /* OTG_STATUS_FRAME_COUNT->OTG_FRAME_COUNT from frame count (read-only) */ + uint32_t otg_horz_count; /* OTG_STATUS_POSITION->OTG_HORZ_COUNT from horizontal position (read-only) */ + uint32_t otg_vert_count; /* OTG_STATUS_POSITION->OTG_VERT_COUNT from vertical position (read-only) */ + uint32_t otg_horz_count_hv; /* OTG_STATUS_HV_COUNT->OTG_HORZ_COUNT from horizontal count (read-only) */ + uint32_t otg_vert_count_nom; /* OTG_STATUS_HV_COUNT->OTG_VERT_COUNT_NOM from vertical count nominal (read-only) */ + uint32_t otg_flip_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_FLIP_PENDING from flip pending status (read-only) */ + uint32_t otg_dc_reg_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_DC_REG_UPDATE_PENDING from DC register update pending (read-only) */ + uint32_t otg_cursor_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_CURSOR_UPDATE_PENDING from cursor update pending (read-only) */ + uint32_t otg_vupdate_keepout_status; /* OTG_PIPE_UPDATE_STATUS->OTG_VUPDATE_KEEPOUT_STATUS from VUPDATE keepout status (read-only) */ + } optc[MAX_PIPES]; + + /* Metadata */ + uint32_t active_pipe_count; + uint32_t active_stream_count; + bool state_valid; +}; + +/** + * dc_capture_register_software_state() - Capture software state for register programming + * @dc: DC context containing current display configuration + * @state: Pointer to dc_register_software_state structure to populate + * + * Extracts all software state variables that are used to program hardware register + * fields across the display driver pipeline. This provides a complete snapshot + * of the software configuration that drives hardware register programming. + * + * The function traverses the DC context and extracts values from: + * - Stream configurations (timing, format, DSC settings) + * - Plane states (surface format, rotation, scaling, cursor) + * - Pipe contexts (resource allocation, blending, viewport) + * - Clock manager (display clocks, DPP clocks, pixel clocks) + * - Resource context (DET buffer allocation, ODM configuration) + * + * This is essential for underflow debugging as it captures the exact software + * state that determines how registers are programmed, allowing analysis of + * whether underflow is caused by incorrect register programming or timing issues. + * + * Return: true if state was successfully captured, false on error + */ +bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state); + #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 5fa5e2b63fb7..40d7a7d83c40 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -91,9 +91,17 @@ struct dc_vbios_funcs { struct device_id id); /* COMMANDS */ + enum bp_result (*select_crtc_source)( + struct dc_bios *bios, + struct bp_crtc_source_select *bp_params); enum bp_result (*encoder_control)( struct dc_bios *bios, struct bp_encoder_control *cntl); + enum bp_result (*dac_load_detection)( + struct dc_bios *bios, + enum engine_id engine_id, + enum dal_device_type device_type, + uint32_t enum_id); enum bp_result (*transmitter_control)( struct dc_bios *bios, struct bp_transmitter_control *cntl); @@ -165,6 +173,7 @@ struct dc_vbios_funcs { }; struct bios_registers { + uint32_t BIOS_SCRATCH_0; uint32_t BIOS_SCRATCH_3; uint32_t BIOS_SCRATCH_6; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 53a088ebddef..7b09af1cb306 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -442,7 +442,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru int i = 0, k = 0; int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it. uint8_t visual_confirm_enabled; - int pipe_idx = 0; struct dc_stream_status *stream_status = NULL; if (dc == NULL) @@ -457,7 +456,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled; if (should_manage_pstate) { - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->stream) @@ -472,7 +471,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; break; } - pipe_idx++; } } @@ -872,7 +870,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, bool enable) { uint8_t cmd_pipe_index = 0; - uint32_t i, pipe_idx; + uint32_t i; uint8_t subvp_count = 0; union dmub_rb_cmd cmd; struct pipe_ctx *subvp_pipes[2]; @@ -899,7 +897,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, if (enable) { // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); @@ -922,7 +920,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); } - pipe_idx++; } if (subvp_count == 2) { update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes); @@ -1174,6 +1171,100 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index); } +void dc_dmub_srv_cursor_offload_init(struct dc *dc) +{ + struct dmub_rb_cmd_cursor_offload_init *init; + struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; + union dmub_rb_cmd cmd; + + if (!dc->config.enable_cursor_offload) + return; + + if (!dc_dmub_srv->dmub->meta_info.feature_bits.bits.cursor_offload_v1_support) + return; + + if (!dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr || !dc_dmub_srv->dmub->cursor_offload_fb.cpu_addr) + return; + + if (!dc_dmub_srv->dmub->cursor_offload_v1) + return; + + if (!dc_dmub_srv->dmub->shared_state) + return; + + memset(&cmd, 0, sizeof(cmd)); + + init = &cmd.cursor_offload_init; + init->header.type = DMUB_CMD__CURSOR_OFFLOAD; + init->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_INIT; + init->header.payload_bytes = sizeof(init->init_data); + init->init_data.state_addr.quad_part = dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr; + init->init_data.state_size = dc_dmub_srv->dmub->cursor_offload_fb.size; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + dc_dmub_srv->cursor_offload_enabled = true; +} + +void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream, bool enable) +{ + struct pipe_ctx const *pipe_ctx; + struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl; + union dmub_rb_cmd cmd; + + if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) + return; + + if (!stream) + return; + + pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); + if (!pipe_ctx || !pipe_ctx->stream_res.tg || pipe_ctx->stream != stream) + return; + + memset(&cmd, 0, sizeof(cmd)); + + cntl = &cmd.cursor_offload_stream_ctnl; + cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD; + cntl->header.sub_type = + enable ? DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE : DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE; + cntl->header.payload_bytes = sizeof(cntl->data); + + cntl->data.otg_inst = pipe_ctx->stream_res.tg->inst; + cntl->data.line_time_in_ns = 1u + (uint32_t)(div64_u64(stream->timing.h_total * 1000000ull, + stream->timing.pix_clk_100hz / 10)); + + cntl->data.v_total_max = stream->adjust.v_total_max > stream->timing.v_total ? + stream->adjust.v_total_max : + stream->timing.v_total; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, + enable ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); +} + +void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe) +{ + struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl; + union dmub_rb_cmd cmd; + + if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) + return; + + if (!pipe || !pipe->stream || !pipe->stream_res.tg) + return; + + memset(&cmd, 0, sizeof(cmd)); + + cntl = &cmd.cursor_offload_stream_ctnl; + cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD; + cntl->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM; + cntl->header.payload_bytes = sizeof(cntl->data); + cntl->data.otg_inst = pipe->stream_res.tg->inst; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); +} + bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) { struct dc_context *dc_ctx; @@ -1993,6 +2084,9 @@ bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv) struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; bool result; + if (!dc_dmub_srv->dmub->feature_caps.lsdma_support_in_dmu) + return false; + memset(&cmd, 0, sizeof(cmd)); cmd.cmd_common.header.type = DMUB_CMD__LSDMA; @@ -2231,6 +2325,11 @@ bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uin return result; } +bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc) +{ + return dc->ctx->dmub_srv && dc->ctx->dmub_srv->cursor_offload_enabled; +} + void dc_dmub_srv_release_hw(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; @@ -2248,3 +2347,24 @@ void dc_dmub_srv_release_hw(const struct dc *dc) dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } + +void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return; + + dmub = dc_dmub_srv->dmub; + + if (dmub_srv_get_preos_info(dmub)) { + DC_LOG_DEBUG("%s: PreOS DMCUB Info", __func__); + DC_LOG_DEBUG("fw_version : 0x%08x", dmub->preos_info.fw_version); + DC_LOG_DEBUG("boot_options : 0x%08x", dmub->preos_info.boot_options); + DC_LOG_DEBUG("boot_status : 0x%08x", dmub->preos_info.boot_status); + DC_LOG_DEBUG("trace_buffer_phy_addr : 0x%016llx", dmub->preos_info.trace_buffer_phy_addr); + DC_LOG_DEBUG("trace_buffer_size_bytes : 0x%08x", dmub->preos_info.trace_buffer_size); + DC_LOG_DEBUG("fb_base : 0x%016llx", dmub->preos_info.fb_base); + DC_LOG_DEBUG("fb_offset : 0x%016llx", dmub->preos_info.fb_offset); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 7ef93444ef3c..72e0a41f39f0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -56,6 +56,7 @@ struct dc_dmub_srv { union dmub_shared_state_ips_driver_signals driver_signals; bool idle_allowed; bool needs_idle_wake; + bool cursor_offload_enabled; }; bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv); @@ -326,9 +327,51 @@ bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t enum ips_residency_mode ips_mode); /** + * dc_dmub_srv_cursor_offload_init() - Enables or disables cursor offloading for a stream. + * + * @dc: pointer to DC object + */ +void dc_dmub_srv_cursor_offload_init(struct dc *dc); + +/** + * dc_dmub_srv_control_cursor_offload() - Enables or disables cursor offloading for a stream. + * + * @dc: pointer to DC object + * @context: the DC context to reference for pipe allocations + * @stream: the stream to control + * @enable: true to enable cursor offload, false to disable + */ +void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream, bool enable); + +/** + * dc_dmub_srv_program_cursor_now() - Requests immediate cursor programming for a given pipe. + * + * @dc: pointer to DC object + * @pipe: top-most pipe for a stream. + */ +void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe); + +/** + * dc_dmub_srv_is_cursor_offload_enabled() - Checks if cursor offload is supported. + * + * @dc: pointer to DC object + * + * Return: true if cursor offload is supported, false otherwise + */ +bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc); + +/** * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required. * * @dc - pointer to DC object */ void dc_dmub_srv_release_hw(const struct dc *dc); + +/** + * dc_dmub_srv_log_preos_dmcub_info() - Logs preos dmcub fw info. + * + * @dc - pointer to DC object + */ +void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index db669ccb1d58..79e1696def63 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -1157,6 +1157,16 @@ struct dprx_states { bool cable_id_written; }; +union dpcd_panel_replay_capability_supported { + struct { + unsigned char PANEL_REPLAY_SUPPORT :1; + unsigned char SELECTIVE_UPDATE_SUPPORT :1; + unsigned char EARLY_TRANSPORT_SUPPORT :1; + unsigned char RESERVED :5; + } bits; + unsigned char raw; +}; + enum dpcd_downstream_port_max_bpc { DOWN_STREAM_MAX_8BPC = 0, DOWN_STREAM_MAX_10BPC, @@ -1280,6 +1290,7 @@ struct dpcd_caps { struct edp_psr_info psr_info; struct replay_info pr_info; + union dpcd_panel_replay_capability_supported pr_caps_supported; uint16_t edp_oled_emission_rate; union dp_receive_port0_cap receive_port0_cap; /* Indicates the number of SST links supported by MSO (Multi-Stream Output) */ @@ -1346,6 +1357,31 @@ union dpcd_replay_configuration { unsigned char raw; }; +union panel_replay_enable_and_configuration_1 { + struct { + unsigned char PANEL_REPLAY_ENABLE :1; + unsigned char PANEL_REPLAY_CRC_ENABLE :1; + unsigned char IRQ_HPD_ASSDP_MISSING :1; + unsigned char IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR :1; + unsigned char IRQ_HPD_RFB_ERROR :1; + unsigned char IRQ_HPD_ACTIVE_FRAME_CRC_ERROR :1; + unsigned char PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE :1; + unsigned char PANEL_REPLAY_EARLY_TRANSPORT_ENABLE :1; + } bits; + unsigned char raw; +}; + +union panel_replay_enable_and_configuration_2 { + struct { + unsigned char SINK_REFRESH_RATE_UNLOCK_GRANTED :1; + unsigned char RESERVED :1; + unsigned char SU_Y_GRANULARITY_EXT_VALUE_ENABLED :1; + unsigned char SU_Y_GRANULARITY_EXT_VALUE :4; + unsigned char SU_REGION_SCAN_LINE_CAPTURE_INDICATION :1; + } bits; + unsigned char raw; +}; + union dpcd_alpm_configuration { struct { unsigned char ENABLE : 1; diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index 55704d4457ef..37d1a79e8241 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -147,6 +147,8 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->prefer_easf = false; else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2) spl_in->disable_easf = true; + else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 3) + spl_in->override_easf = true; /* Translate adaptive sharpening preference */ unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness; unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 76cf9fdedab0..321cfe92d799 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -473,12 +473,11 @@ void dc_enable_stereo( /* Triggers multi-stream synchronization. */ void dc_trigger_sync(struct dc *dc, struct dc_state *context); -enum surface_update_type dc_check_update_surfaces_for_stream( - struct dc *dc, +struct surface_update_descriptor dc_check_update_surfaces_for_stream( + const struct dc_check_config *check_config, struct dc_surface_update *updates, int surface_count, - struct dc_stream_update *stream_update, - const struct dc_stream_status *stream_status); + struct dc_stream_update *stream_update); /** * Create a new default stream for the requested sink @@ -492,8 +491,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink); void dc_stream_retain(struct dc_stream_state *dc_stream); void dc_stream_release(struct dc_stream_state *dc_stream); -struct dc_stream_status *dc_stream_get_status( - struct dc_stream_state *dc_stream); +struct dc_stream_status *dc_stream_get_status(struct dc_stream_state *dc_stream); +const struct dc_stream_status *dc_stream_get_status_const(const struct dc_stream_state *dc_stream); /******************************************************************************* * Cursor interfaces - To manages the cursor within a stream diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index b5aa03a3e39c..f46039f64203 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -185,6 +185,10 @@ struct dc_panel_patch { unsigned int wait_after_dpcd_poweroff_ms; }; +/** + * struct dc_edid_caps - Capabilities read from EDID. + * @analog: Whether the monitor is analog. Used by DVI-I handling. + */ struct dc_edid_caps { /* sink identification */ uint16_t manufacturer_id; @@ -212,6 +216,8 @@ struct dc_edid_caps { bool edid_hdmi; bool hdr_supported; bool rr_capable; + bool scdc_present; + bool analog; struct dc_panel_patch panel_patch; }; @@ -347,7 +353,8 @@ enum dc_connection_type { dc_connection_none, dc_connection_single, dc_connection_mst_branch, - dc_connection_sst_branch + dc_connection_sst_branch, + dc_connection_dac_load }; struct dc_csc_adjustments { @@ -934,6 +941,12 @@ enum dc_psr_version { DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF, }; +enum dc_replay_version { + DC_FREESYNC_REPLAY = 0, + DC_VESA_PANEL_REPLAY = 1, + DC_REPLAY_VERSION_UNSUPPORTED = 0XFF, +}; + /* Possible values of display_endpoint_id.endpoint */ enum display_endpoint_type { DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */ @@ -1086,6 +1099,7 @@ enum replay_FW_Message_type { Replay_Set_Residency_Frameupdate_Timer, Replay_Set_Pseudo_VTotal, Replay_Disabled_Adaptive_Sync_SDP, + Replay_Set_Version, Replay_Set_General_Cmd, }; @@ -1121,6 +1135,8 @@ union replay_low_refresh_rate_enable_options { }; struct replay_config { + /* Replay version */ + enum dc_replay_version replay_version; /* Replay feature is supported */ bool replay_supported; /* Replay caps support DPCD & EDID caps*/ @@ -1177,6 +1193,10 @@ struct replay_settings { uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; /* Defer Update Coasting vtotal table */ uint32_t defer_update_coasting_vtotal_table[PR_COASTING_TYPE_NUM]; + /* Skip frame number table */ + uint32_t frame_skip_number_table[PR_COASTING_TYPE_NUM]; + /* Defer skip frame number table */ + uint32_t defer_frame_skip_number_table[PR_COASTING_TYPE_NUM]; /* Maximum link off frame count */ uint32_t link_off_frame_count; /* Replay pseudo vtotal for low refresh rate*/ @@ -1185,6 +1205,8 @@ struct replay_settings { uint16_t last_pseudo_vtotal; /* Replay desync error */ uint32_t replay_desync_error_fail_count; + /* The frame skip number dal send to DMUB */ + uint16_t frame_skip_number; }; /* To split out "global" and "per-panel" config settings. diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c index 5999b2da3a01..33d8bd91cb01 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c @@ -148,7 +148,7 @@ struct dccg *dccg2_create( const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask) { - struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC); + struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL); struct dccg *base; if (dccg_dcn == NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h index a9b88f5e0c04..8bdffd9ff31b 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h @@ -425,7 +425,69 @@ struct dccg_mask { uint32_t SYMCLKD_CLOCK_ENABLE; \ uint32_t SYMCLKE_CLOCK_ENABLE; \ uint32_t DP_DTO_MODULO[MAX_PIPES]; \ - uint32_t DP_DTO_PHASE[MAX_PIPES] + uint32_t DP_DTO_PHASE[MAX_PIPES]; \ + uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL; \ + uint32_t DCCG_AUDIO_DTO0_MODULE; \ + uint32_t DCCG_AUDIO_DTO0_PHASE; \ + uint32_t DCCG_AUDIO_DTO1_MODULE; \ + uint32_t DCCG_AUDIO_DTO1_PHASE; \ + uint32_t DCCG_CAC_STATUS; \ + uint32_t DCCG_CAC_STATUS2; \ + uint32_t DCCG_DISP_CNTL_REG; \ + uint32_t DCCG_DS_CNTL; \ + uint32_t DCCG_DS_DTO_INCR; \ + uint32_t DCCG_DS_DTO_MODULO; \ + uint32_t DCCG_DS_HW_CAL_INTERVAL; \ + uint32_t DCCG_GTC_CNTL; \ + uint32_t DCCG_GTC_CURRENT; \ + uint32_t DCCG_GTC_DTO_INCR; \ + uint32_t DCCG_GTC_DTO_MODULO; \ + uint32_t DCCG_PERFMON_CNTL; \ + uint32_t DCCG_PERFMON_CNTL2; \ + uint32_t DCCG_SOFT_RESET; \ + uint32_t DCCG_TEST_CLK_SEL; \ + uint32_t DCCG_VSYNC_CNT_CTRL; \ + uint32_t DCCG_VSYNC_CNT_INT_CTRL; \ + uint32_t DCCG_VSYNC_OTG0_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG1_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG2_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG3_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG4_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG5_LATCH_VALUE; \ + uint32_t DISPCLK_CGTT_BLK_CTRL_REG; \ + uint32_t DP_DTO_DBUF_EN; \ + uint32_t DPIACLK_540M_DTO_MODULO; \ + uint32_t DPIACLK_540M_DTO_PHASE; \ + uint32_t DPIACLK_810M_DTO_MODULO; \ + uint32_t DPIACLK_810M_DTO_PHASE; \ + uint32_t DPIACLK_DTO_CNTL; \ + uint32_t DPIASYMCLK_CNTL; \ + uint32_t DPPCLK_CGTT_BLK_CTRL_REG; \ + uint32_t DPREFCLK_CGTT_BLK_CTRL_REG; \ + uint32_t DPREFCLK_CNTL; \ + uint32_t DTBCLK_DTO_DBUF_EN; \ + uint32_t FORCE_SYMCLK_DISABLE; \ + uint32_t HDMICHARCLK0_CLOCK_CNTL; \ + uint32_t MICROSECOND_TIME_BASE_DIV; \ + uint32_t MILLISECOND_TIME_BASE_DIV; \ + uint32_t OTG0_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG0_PIXEL_RATE_CNTL; \ + uint32_t OTG1_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG1_PIXEL_RATE_CNTL; \ + uint32_t OTG2_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG2_PIXEL_RATE_CNTL; \ + uint32_t OTG3_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG3_PIXEL_RATE_CNTL; \ + uint32_t PHYPLLA_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLB_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLC_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLD_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLE_PIXCLK_RESYNC_CNTL; \ + uint32_t REFCLK_CGTT_BLK_CTRL_REG; \ + uint32_t SOCCLK_CGTT_BLK_CTRL_REG; \ + uint32_t SYMCLK_CGTT_BLK_CTRL_REG; \ + uint32_t SYMCLK_PSP_CNTL + struct dccg_registers { DCCG_REG_VARIABLE_LIST; }; diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c index 8664f0c4c9b7..97df04b7e39d 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c @@ -709,6 +709,128 @@ void dccg31_otg_drop_pixel(struct dccg *dccg, OTG_DROP_PIXEL[otg_inst], 1); } +void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + dccg_reg_state->dc_mem_global_pwr_req_cntl = REG_READ(DC_MEM_GLOBAL_PWR_REQ_CNTL); + dccg_reg_state->dccg_audio_dtbclk_dto_modulo = REG_READ(DCCG_AUDIO_DTBCLK_DTO_MODULO); + dccg_reg_state->dccg_audio_dtbclk_dto_phase = REG_READ(DCCG_AUDIO_DTBCLK_DTO_PHASE); + dccg_reg_state->dccg_audio_dto_source = REG_READ(DCCG_AUDIO_DTO_SOURCE); + dccg_reg_state->dccg_audio_dto0_module = REG_READ(DCCG_AUDIO_DTO0_MODULE); + dccg_reg_state->dccg_audio_dto0_phase = REG_READ(DCCG_AUDIO_DTO0_PHASE); + dccg_reg_state->dccg_audio_dto1_module = REG_READ(DCCG_AUDIO_DTO1_MODULE); + dccg_reg_state->dccg_audio_dto1_phase = REG_READ(DCCG_AUDIO_DTO1_PHASE); + dccg_reg_state->dccg_cac_status = REG_READ(DCCG_CAC_STATUS); + dccg_reg_state->dccg_cac_status2 = REG_READ(DCCG_CAC_STATUS2); + dccg_reg_state->dccg_disp_cntl_reg = REG_READ(DCCG_DISP_CNTL_REG); + dccg_reg_state->dccg_ds_cntl = REG_READ(DCCG_DS_CNTL); + dccg_reg_state->dccg_ds_dto_incr = REG_READ(DCCG_DS_DTO_INCR); + dccg_reg_state->dccg_ds_dto_modulo = REG_READ(DCCG_DS_DTO_MODULO); + dccg_reg_state->dccg_ds_hw_cal_interval = REG_READ(DCCG_DS_HW_CAL_INTERVAL); + dccg_reg_state->dccg_gate_disable_cntl = REG_READ(DCCG_GATE_DISABLE_CNTL); + dccg_reg_state->dccg_gate_disable_cntl2 = REG_READ(DCCG_GATE_DISABLE_CNTL2); + dccg_reg_state->dccg_gate_disable_cntl3 = REG_READ(DCCG_GATE_DISABLE_CNTL3); + dccg_reg_state->dccg_gate_disable_cntl4 = REG_READ(DCCG_GATE_DISABLE_CNTL4); + dccg_reg_state->dccg_gate_disable_cntl5 = REG_READ(DCCG_GATE_DISABLE_CNTL5); + dccg_reg_state->dccg_gate_disable_cntl6 = REG_READ(DCCG_GATE_DISABLE_CNTL6); + dccg_reg_state->dccg_global_fgcg_rep_cntl = REG_READ(DCCG_GLOBAL_FGCG_REP_CNTL); + dccg_reg_state->dccg_gtc_cntl = REG_READ(DCCG_GTC_CNTL); + dccg_reg_state->dccg_gtc_current = REG_READ(DCCG_GTC_CURRENT); + dccg_reg_state->dccg_gtc_dto_incr = REG_READ(DCCG_GTC_DTO_INCR); + dccg_reg_state->dccg_gtc_dto_modulo = REG_READ(DCCG_GTC_DTO_MODULO); + dccg_reg_state->dccg_perfmon_cntl = REG_READ(DCCG_PERFMON_CNTL); + dccg_reg_state->dccg_perfmon_cntl2 = REG_READ(DCCG_PERFMON_CNTL2); + dccg_reg_state->dccg_soft_reset = REG_READ(DCCG_SOFT_RESET); + dccg_reg_state->dccg_test_clk_sel = REG_READ(DCCG_TEST_CLK_SEL); + dccg_reg_state->dccg_vsync_cnt_ctrl = REG_READ(DCCG_VSYNC_CNT_CTRL); + dccg_reg_state->dccg_vsync_cnt_int_ctrl = REG_READ(DCCG_VSYNC_CNT_INT_CTRL); + dccg_reg_state->dccg_vsync_otg0_latch_value = REG_READ(DCCG_VSYNC_OTG0_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg1_latch_value = REG_READ(DCCG_VSYNC_OTG1_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg2_latch_value = REG_READ(DCCG_VSYNC_OTG2_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg3_latch_value = REG_READ(DCCG_VSYNC_OTG3_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg4_latch_value = REG_READ(DCCG_VSYNC_OTG4_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg5_latch_value = REG_READ(DCCG_VSYNC_OTG5_LATCH_VALUE); + dccg_reg_state->dispclk_cgtt_blk_ctrl_reg = REG_READ(DISPCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->dispclk_freq_change_cntl = REG_READ(DISPCLK_FREQ_CHANGE_CNTL); + dccg_reg_state->dp_dto_dbuf_en = REG_READ(DP_DTO_DBUF_EN); + dccg_reg_state->dp_dto0_modulo = REG_READ(DP_DTO_MODULO[0]); + dccg_reg_state->dp_dto0_phase = REG_READ(DP_DTO_PHASE[0]); + dccg_reg_state->dp_dto1_modulo = REG_READ(DP_DTO_MODULO[1]); + dccg_reg_state->dp_dto1_phase = REG_READ(DP_DTO_PHASE[1]); + dccg_reg_state->dp_dto2_modulo = REG_READ(DP_DTO_MODULO[2]); + dccg_reg_state->dp_dto2_phase = REG_READ(DP_DTO_PHASE[2]); + dccg_reg_state->dp_dto3_modulo = REG_READ(DP_DTO_MODULO[3]); + dccg_reg_state->dp_dto3_phase = REG_READ(DP_DTO_PHASE[3]); + dccg_reg_state->dpiaclk_540m_dto_modulo = REG_READ(DPIACLK_540M_DTO_MODULO); + dccg_reg_state->dpiaclk_540m_dto_phase = REG_READ(DPIACLK_540M_DTO_PHASE); + dccg_reg_state->dpiaclk_810m_dto_modulo = REG_READ(DPIACLK_810M_DTO_MODULO); + dccg_reg_state->dpiaclk_810m_dto_phase = REG_READ(DPIACLK_810M_DTO_PHASE); + dccg_reg_state->dpiaclk_dto_cntl = REG_READ(DPIACLK_DTO_CNTL); + dccg_reg_state->dpiasymclk_cntl = REG_READ(DPIASYMCLK_CNTL); + dccg_reg_state->dppclk_cgtt_blk_ctrl_reg = REG_READ(DPPCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->dppclk_ctrl = REG_READ(DPPCLK_CTRL); + dccg_reg_state->dppclk_dto_ctrl = REG_READ(DPPCLK_DTO_CTRL); + dccg_reg_state->dppclk0_dto_param = REG_READ(DPPCLK_DTO_PARAM[0]); + dccg_reg_state->dppclk1_dto_param = REG_READ(DPPCLK_DTO_PARAM[1]); + dccg_reg_state->dppclk2_dto_param = REG_READ(DPPCLK_DTO_PARAM[2]); + dccg_reg_state->dppclk3_dto_param = REG_READ(DPPCLK_DTO_PARAM[3]); + dccg_reg_state->dprefclk_cgtt_blk_ctrl_reg = REG_READ(DPREFCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->dprefclk_cntl = REG_READ(DPREFCLK_CNTL); + dccg_reg_state->dpstreamclk_cntl = REG_READ(DPSTREAMCLK_CNTL); + dccg_reg_state->dscclk_dto_ctrl = REG_READ(DSCCLK_DTO_CTRL); + dccg_reg_state->dscclk0_dto_param = REG_READ(DSCCLK0_DTO_PARAM); + dccg_reg_state->dscclk1_dto_param = REG_READ(DSCCLK1_DTO_PARAM); + dccg_reg_state->dscclk2_dto_param = REG_READ(DSCCLK2_DTO_PARAM); + dccg_reg_state->dscclk3_dto_param = REG_READ(DSCCLK3_DTO_PARAM); + dccg_reg_state->dtbclk_dto_dbuf_en = REG_READ(DTBCLK_DTO_DBUF_EN); + dccg_reg_state->dtbclk_dto0_modulo = REG_READ(DTBCLK_DTO_MODULO[0]); + dccg_reg_state->dtbclk_dto0_phase = REG_READ(DTBCLK_DTO_PHASE[0]); + dccg_reg_state->dtbclk_dto1_modulo = REG_READ(DTBCLK_DTO_MODULO[1]); + dccg_reg_state->dtbclk_dto1_phase = REG_READ(DTBCLK_DTO_PHASE[1]); + dccg_reg_state->dtbclk_dto2_modulo = REG_READ(DTBCLK_DTO_MODULO[2]); + dccg_reg_state->dtbclk_dto2_phase = REG_READ(DTBCLK_DTO_PHASE[2]); + dccg_reg_state->dtbclk_dto3_modulo = REG_READ(DTBCLK_DTO_MODULO[3]); + dccg_reg_state->dtbclk_dto3_phase = REG_READ(DTBCLK_DTO_PHASE[3]); + dccg_reg_state->dtbclk_p_cntl = REG_READ(DTBCLK_P_CNTL); + dccg_reg_state->force_symclk_disable = REG_READ(FORCE_SYMCLK_DISABLE); + dccg_reg_state->hdmicharclk0_clock_cntl = REG_READ(HDMICHARCLK0_CLOCK_CNTL); + dccg_reg_state->hdmistreamclk_cntl = REG_READ(HDMISTREAMCLK_CNTL); + dccg_reg_state->hdmistreamclk0_dto_param = REG_READ(HDMISTREAMCLK0_DTO_PARAM); + dccg_reg_state->microsecond_time_base_div = REG_READ(MICROSECOND_TIME_BASE_DIV); + dccg_reg_state->millisecond_time_base_div = REG_READ(MILLISECOND_TIME_BASE_DIV); + dccg_reg_state->otg_pixel_rate_div = REG_READ(OTG_PIXEL_RATE_DIV); + dccg_reg_state->otg0_phypll_pixel_rate_cntl = REG_READ(OTG0_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg0_pixel_rate_cntl = REG_READ(OTG0_PIXEL_RATE_CNTL); + dccg_reg_state->otg1_phypll_pixel_rate_cntl = REG_READ(OTG1_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg1_pixel_rate_cntl = REG_READ(OTG1_PIXEL_RATE_CNTL); + dccg_reg_state->otg2_phypll_pixel_rate_cntl = REG_READ(OTG2_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg2_pixel_rate_cntl = REG_READ(OTG2_PIXEL_RATE_CNTL); + dccg_reg_state->otg3_phypll_pixel_rate_cntl = REG_READ(OTG3_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg3_pixel_rate_cntl = REG_READ(OTG3_PIXEL_RATE_CNTL); + dccg_reg_state->phyasymclk_clock_cntl = REG_READ(PHYASYMCLK_CLOCK_CNTL); + dccg_reg_state->phybsymclk_clock_cntl = REG_READ(PHYBSYMCLK_CLOCK_CNTL); + dccg_reg_state->phycsymclk_clock_cntl = REG_READ(PHYCSYMCLK_CLOCK_CNTL); + dccg_reg_state->phydsymclk_clock_cntl = REG_READ(PHYDSYMCLK_CLOCK_CNTL); + dccg_reg_state->phyesymclk_clock_cntl = REG_READ(PHYESYMCLK_CLOCK_CNTL); + dccg_reg_state->phyplla_pixclk_resync_cntl = REG_READ(PHYPLLA_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phypllb_pixclk_resync_cntl = REG_READ(PHYPLLB_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phypllc_pixclk_resync_cntl = REG_READ(PHYPLLC_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phyplld_pixclk_resync_cntl = REG_READ(PHYPLLD_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phyplle_pixclk_resync_cntl = REG_READ(PHYPLLE_PIXCLK_RESYNC_CNTL); + dccg_reg_state->refclk_cgtt_blk_ctrl_reg = REG_READ(REFCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->socclk_cgtt_blk_ctrl_reg = REG_READ(SOCCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->symclk_cgtt_blk_ctrl_reg = REG_READ(SYMCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->symclk_psp_cntl = REG_READ(SYMCLK_PSP_CNTL); + dccg_reg_state->symclk32_le_cntl = REG_READ(SYMCLK32_LE_CNTL); + dccg_reg_state->symclk32_se_cntl = REG_READ(SYMCLK32_SE_CNTL); + dccg_reg_state->symclka_clock_enable = REG_READ(SYMCLKA_CLOCK_ENABLE); + dccg_reg_state->symclkb_clock_enable = REG_READ(SYMCLKB_CLOCK_ENABLE); + dccg_reg_state->symclkc_clock_enable = REG_READ(SYMCLKC_CLOCK_ENABLE); + dccg_reg_state->symclkd_clock_enable = REG_READ(SYMCLKD_CLOCK_ENABLE); + dccg_reg_state->symclke_clock_enable = REG_READ(SYMCLKE_CLOCK_ENABLE); +} + static const struct dccg_funcs dccg31_funcs = { .update_dpp_dto = dccg31_update_dpp_dto, .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, @@ -727,6 +849,7 @@ static const struct dccg_funcs dccg31_funcs = { .set_dispclk_change_mode = dccg31_set_dispclk_change_mode, .disable_dsc = dccg31_disable_dscclk, .enable_dsc = dccg31_enable_dscclk, + .dccg_read_reg_state = dccg31_read_reg_state, }; struct dccg *dccg31_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h index cd261051dc2c..bf659920d4cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h @@ -236,4 +236,6 @@ void dccg31_disable_dscclk(struct dccg *dccg, int inst); void dccg31_enable_dscclk(struct dccg *dccg, int inst); +void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state); + #endif //__DCN31_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c index 8f6edd8e9beb..ef3db6beba25 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c @@ -377,7 +377,8 @@ static const struct dccg_funcs dccg314_funcs = { .get_pixel_rate_div = dccg314_get_pixel_rate_div, .trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync, .set_valid_pixel_rate = dccg314_set_valid_pixel_rate, - .set_dtbclk_p_src = dccg314_set_dtbclk_p_src + .set_dtbclk_p_src = dccg314_set_dtbclk_p_src, + .dccg_read_reg_state = dccg31_read_reg_state }; struct dccg *dccg314_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h index 60ea1d248deb..a609635f35db 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h @@ -74,8 +74,7 @@ SR(DCCG_GATE_DISABLE_CNTL3),\ SR(HDMISTREAMCLK0_DTO_PARAM),\ SR(OTG_PIXEL_RATE_DIV),\ - SR(DTBCLK_P_CNTL),\ - SR(DCCG_AUDIO_DTO_SOURCE) + SR(DTBCLK_P_CNTL) #define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c index de6d62401362..bd2f528137b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c @@ -1114,6 +1114,16 @@ static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg) if (dispclk_rdivider_value != 0) REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); } +static void dccg35_wait_for_dentist_change_done( + struct dccg *dccg) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL); + + REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value); + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000); +} static void dcn35_set_dppclk_enable(struct dccg *dccg, uint32_t dpp_inst, uint32_t enable) @@ -1174,9 +1184,9 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, dcn35_set_dppclk_enable(dccg, dpp_inst, true); } else { dcn35_set_dppclk_enable(dccg, dpp_inst, false); - /*we have this in hwss: disable_plane*/ - //dccg35_set_dppclk_rcg(dccg, dpp_inst, true); + dccg35_set_dppclk_rcg(dccg, dpp_inst, true); } + udelay(10); dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; } @@ -1300,6 +1310,8 @@ static void dccg35_set_pixel_rate_div( BREAK_TO_DEBUGGER(); return; } + if (otg_inst < 4) + dccg35_wait_for_dentist_change_done(dccg); } static void dccg35_set_dtbclk_p_src( @@ -1411,7 +1423,7 @@ static void dccg35_set_dtbclk_dto( __func__, params->otg_inst, params->pixclk_khz, params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo); - } else { + } else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) { switch (params->otg_inst) { case 0: REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0); @@ -1664,7 +1676,7 @@ static void dccg35_dpp_root_clock_control( { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - if (dccg->dpp_clock_gated[dpp_inst] == clock_on) + if (dccg->dpp_clock_gated[dpp_inst] != clock_on) return; if (clock_on) { @@ -1682,9 +1694,12 @@ static void dccg35_dpp_root_clock_control( DPPCLK0_DTO_PHASE, 0, DPPCLK0_DTO_MODULO, 1); /*we have this in hwss: disable_plane*/ - //dccg35_set_dppclk_rcg(dccg, dpp_inst, true); + dccg35_set_dppclk_rcg(dccg, dpp_inst, true); } + // wait for clock to fully ramp + udelay(10); + dccg->dpp_clock_gated[dpp_inst] = !clock_on; DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on); } @@ -2438,6 +2453,7 @@ static const struct dccg_funcs dccg35_funcs = { .disable_symclk_se = dccg35_disable_symclk_se, .set_dtbclk_p_src = dccg35_set_dtbclk_p_src, .dccg_root_gate_disable_control = dccg35_root_gate_disable_control, + .dccg_read_reg_state = dccg31_read_reg_state, }; struct dccg *dccg35_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h index 51f98c5c51c4..7b9c36456cd9 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h @@ -41,8 +41,9 @@ SR(SYMCLKA_CLOCK_ENABLE),\ SR(SYMCLKB_CLOCK_ENABLE),\ SR(SYMCLKC_CLOCK_ENABLE),\ - SR(SYMCLKD_CLOCK_ENABLE),\ - SR(SYMCLKE_CLOCK_ENABLE) + SR(SYMCLKD_CLOCK_ENABLE), \ + SR(SYMCLKE_CLOCK_ENABLE),\ + SR(SYMCLK_PSP_CNTL) #define DCCG_MASK_SH_LIST_DCN35(mask_sh) \ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\ @@ -231,6 +232,14 @@ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\ struct dccg *dccg35_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index 0b8ed9b94d3c..663a18ee5162 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -886,6 +886,7 @@ static const struct dccg_funcs dccg401_funcs = { .enable_symclk_se = dccg401_enable_symclk_se, .disable_symclk_se = dccg401_disable_symclk_se, .set_dtbclk_p_src = dccg401_set_dtbclk_p_src, + .dccg_read_reg_state = dccg31_read_reg_state }; struct dccg *dccg401_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index a6006776333d..2dcf394edf22 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -283,7 +283,7 @@ struct abm *dce_abm_create( const struct dce_abm_shift *abm_shift, const struct dce_abm_mask *abm_mask) { - struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC); + struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL); if (abm_dce == NULL) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index eeed840073fe..fcad61c618a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -1143,7 +1143,8 @@ void dce_aud_wall_dto_setup( REG_UPDATE(DCCG_AUDIO_DTO1_PHASE, DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase); - REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, + if (aud->masks->DCCG_AUDIO_DTO2_USE_512FBR_DTO) + REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index a8e79104b684..5f8fba45d98d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -1126,7 +1126,7 @@ struct dmcu *dcn10_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { - struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); @@ -1147,7 +1147,7 @@ struct dmcu *dcn20_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { - struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); @@ -1168,7 +1168,7 @@ struct dmcu *dcn21_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { - struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 0c50fe266c8a..87dbb8d7ed27 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -302,6 +302,10 @@ static void setup_panel_mode( if (ctx->dc->caps.psp_setup_panel_mode) return; + /* The code below is only applicable to encoders with a digital transmitter. */ + if (enc110->base.transmitter == TRANSMITTER_UNKNOWN) + return; + ASSERT(REG(DP_DPHY_INTERNAL_CTRL)); value = REG_READ(DP_DPHY_INTERNAL_CTRL); @@ -804,6 +808,33 @@ bool dce110_link_encoder_validate_dp_output( return true; } +static bool dce110_link_encoder_validate_rgb_output( + const struct dce110_link_encoder *enc110, + const struct dc_crtc_timing *crtc_timing) +{ + /* When the VBIOS doesn't specify any limits, use 400 MHz. + * The value comes from amdgpu_atombios_get_clock_info. + */ + uint32_t max_pixel_clock_khz = 400000; + + if (enc110->base.ctx->dc_bios->fw_info_valid && + enc110->base.ctx->dc_bios->fw_info.max_pixel_clock) { + max_pixel_clock_khz = + enc110->base.ctx->dc_bios->fw_info.max_pixel_clock; + } + + if (crtc_timing->pix_clk_100hz > max_pixel_clock_khz * 10) + return false; + + if (crtc_timing->display_color_depth != COLOR_DEPTH_888) + return false; + + if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB) + return false; + + return true; +} + void dce110_link_encoder_construct( struct dce110_link_encoder *enc110, const struct encoder_init_data *init_data, @@ -824,6 +855,7 @@ void dce110_link_encoder_construct( enc110->base.connector = init_data->connector; enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; + enc110->base.analog_engine = init_data->analog_engine; enc110->base.features = *enc_features; @@ -847,6 +879,11 @@ void dce110_link_encoder_construct( SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; + if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII || + enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) && + enc110->base.analog_engine != ENGINE_ID_UNKNOWN) + enc110->base.output_signals |= SIGNAL_TYPE_RGB; + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer @@ -885,6 +922,13 @@ void dce110_link_encoder_construct( enc110->base.preferred_engine = ENGINE_ID_DIGG; break; default: + if (init_data->analog_engine != ENGINE_ID_UNKNOWN) { + /* The connector is analog-only, ie. VGA */ + enc110->base.preferred_engine = init_data->analog_engine; + enc110->base.output_signals = SIGNAL_TYPE_RGB; + enc110->base.transmitter = TRANSMITTER_UNKNOWN; + break; + } ASSERT_CRITICAL(false); enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; } @@ -939,6 +983,10 @@ bool dce110_link_encoder_validate_output_with_stream( is_valid = dce110_link_encoder_validate_dp_output( enc110, &stream->timing); break; + case SIGNAL_TYPE_RGB: + is_valid = dce110_link_encoder_validate_rgb_output( + enc110, &stream->timing); + break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_LVDS: is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB; @@ -969,6 +1017,10 @@ void dce110_link_encoder_hw_init( cntl.coherent = false; cntl.hpd_sel = enc110->base.hpd_source; + /* The code below is only applicable to encoders with a digital transmitter. */ + if (enc110->base.transmitter == TRANSMITTER_UNKNOWN) + return; + if (enc110->base.connector.id == CONNECTOR_ID_EDP) cntl.signal = SIGNAL_TYPE_EDP; @@ -1034,6 +1086,8 @@ void dce110_link_encoder_setup( /* DP MST */ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5); break; + case SIGNAL_TYPE_RGB: + break; default: ASSERT_CRITICAL(false); /* invalid mode ! */ @@ -1282,6 +1336,24 @@ void dce110_link_encoder_disable_output( struct bp_transmitter_control cntl = { 0 }; enum bp_result result; + switch (enc->analog_engine) { + case ENGINE_ID_DACA: + REG_UPDATE(DAC_ENABLE, DAC_ENABLE, 0); + break; + case ENGINE_ID_DACB: + /* DACB doesn't seem to be present on DCE6+, + * although there are references to it in the register file. + */ + DC_LOG_ERROR("%s DACB is unsupported\n", __func__); + break; + default: + break; + } + + /* The code below only applies to connectors that support digital signals. */ + if (enc->transmitter == TRANSMITTER_UNKNOWN) + return; + if (!dce110_is_dig_enabled(enc)) { /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ return; @@ -1726,6 +1798,7 @@ void dce60_link_encoder_construct( enc110->base.connector = init_data->connector; enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; + enc110->base.analog_engine = init_data->analog_engine; enc110->base.features = *enc_features; @@ -1749,6 +1822,11 @@ void dce60_link_encoder_construct( SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; + if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII || + enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) && + enc110->base.analog_engine != ENGINE_ID_UNKNOWN) + enc110->base.output_signals |= SIGNAL_TYPE_RGB; + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer @@ -1787,6 +1865,13 @@ void dce60_link_encoder_construct( enc110->base.preferred_engine = ENGINE_ID_DIGG; break; default: + if (init_data->analog_engine != ENGINE_ID_UNKNOWN) { + /* The connector is analog-only, ie. VGA */ + enc110->base.preferred_engine = init_data->analog_engine; + enc110->base.output_signals = SIGNAL_TYPE_RGB; + enc110->base.transmitter = TRANSMITTER_UNKNOWN; + break; + } ASSERT_CRITICAL(false); enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 261c70e01e33..c58b69bc319b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -101,18 +101,21 @@ SRI(DP_SEC_CNTL, DP, id), \ SRI(DP_VID_STREAM_CNTL, DP, id), \ SRI(DP_DPHY_FAST_TRAINING, DP, id), \ - SRI(DP_SEC_CNTL1, DP, id) + SRI(DP_SEC_CNTL1, DP, id), \ + SR(DAC_ENABLE) #endif #define LE_DCE80_REG_LIST(id)\ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ - LE_COMMON_REG_LIST_BASE(id) + LE_COMMON_REG_LIST_BASE(id), \ + SR(DAC_ENABLE) #define LE_DCE100_REG_LIST(id)\ LE_COMMON_REG_LIST_BASE(id), \ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ - SR(DCI_MEM_PWR_STATUS) + SR(DCI_MEM_PWR_STATUS), \ + SR(DAC_ENABLE) #define LE_DCE110_REG_LIST(id)\ LE_COMMON_REG_LIST_BASE(id), \ @@ -181,6 +184,9 @@ struct dce110_link_enc_registers { uint32_t DP_DPHY_BS_SR_SWAP_CNTL; uint32_t DP_DPHY_HBR2_PATTERN_CONTROL; uint32_t DP_SEC_CNTL1; + + /* DAC registers */ + uint32_t DAC_ENABLE; }; struct dce110_link_encoder { @@ -215,10 +221,6 @@ bool dce110_link_encoder_validate_dvi_output( enum signal_type signal, const struct dc_crtc_timing *crtc_timing); -bool dce110_link_encoder_validate_rgb_output( - const struct dce110_link_encoder *enc110, - const struct dc_crtc_timing *crtc_timing); - bool dce110_link_encoder_validate_dp_output( const struct dce110_link_encoder *enc110, const struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 1130d7619b26..574618d5d4a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -1567,3 +1567,17 @@ void dce110_stream_encoder_construct( enc110->se_shift = se_shift; enc110->se_mask = se_mask; } + +static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {}; + +void dce110_analog_stream_encoder_construct( + struct dce110_stream_encoder *enc110, + struct dc_context *ctx, + struct dc_bios *bp, + enum engine_id eng_id) +{ + enc110->base.funcs = &dce110_an_str_enc_funcs; + enc110->base.ctx = ctx; + enc110->base.id = eng_id; + enc110->base.bp = bp; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h index cc5020a8e1e1..068de1392121 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h @@ -708,6 +708,11 @@ void dce110_stream_encoder_construct( const struct dce_stream_encoder_shift *se_shift, const struct dce_stream_encoder_mask *se_mask); +void dce110_analog_stream_encoder_construct( + struct dce110_stream_encoder *enc110, + struct dc_context *ctx, + struct dc_bios *bp, + enum engine_id eng_id); void dce110_se_audio_mute_control( struct stream_encoder *enc, bool mute); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index d37ecfdde4f1..5bfa2b0d2afd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -61,10 +61,9 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, dc_dmub_srv_wait_for_inbox0_ack(dmub_srv); } -bool should_use_dmub_lock(struct dc_link *link) +bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link) { - /* ASIC doesn't support DMUB */ - if (!link->ctx->dmub_srv) + if (!link) return false; if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) @@ -73,16 +72,38 @@ bool should_use_dmub_lock(struct dc_link *link) if (link->replay_settings.replay_feature_enabled) return true; - /* only use HW lock for PSR1 on single eDP */ if (link->psr_settings.psr_version == DC_PSR_VERSION_1) { struct dc_link *edp_links[MAX_NUM_EDP]; int edp_num; - dc_get_edp_links(link->dc, edp_links, &edp_num); - + dc_get_edp_links(dc, edp_links, &edp_num); if (edp_num == 1) return true; } + return false; +} +bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context) +{ + if (!context) + return false; + for (int i = 0; i < context->stream_count; i++) { + const struct dc_link *link = context->streams[i]->link; + + if (dmub_hw_lock_mgr_does_link_require_lock(dc, link)) + return true; + } return false; } + +bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link) +{ + /* ASIC doesn't support DMUB */ + if (!dc->ctx->dmub_srv) + return false; + + if (dc->ctx->dce_version >= DCN_VERSION_4_01) + return false; + + return dmub_hw_lock_mgr_does_link_require_lock(dc, link); +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h index 5a72b168fb4a..4c80ca8484ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h @@ -37,6 +37,16 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv, void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_cmd_lock_hw hw_lock_cmd); -bool should_use_dmub_lock(struct dc_link *link); +/** + * should_use_dmub_inbox1_lock() - Checks if the DMCUB hardware lock via inbox1 should be used. + * + * @dc: pointer to DC object + * @link: optional pointer to the link object to check for enabled link features + * + * Return: true if the inbox1 lock should be used, false otherwise + */ +bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link); +bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link); +bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context); #endif /*_DMUB_HW_LOCK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index f9542edff14b..cf1372aaff6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -213,7 +213,8 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, */ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, uint32_t coasting_vtotal, - uint8_t panel_inst) + uint8_t panel_inst, + uint16_t frame_skip_number) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; @@ -227,6 +228,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16; + pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number; dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } @@ -283,7 +285,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, * Set REPLAY power optimization flags and coasting vtotal. */ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub, - unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal) + unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal, uint16_t frame_skip_number) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; @@ -301,6 +303,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm pCmd->replay_set_power_opt_data.panel_inst = panel_inst; pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16; + pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number; dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } @@ -384,6 +387,19 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_disabled_adaptive_sync_sdp.data.force_disabled = cmd_element->disabled_adaptive_sync_sdp_data.force_disabled; break; + case Replay_Set_Version: + //Header + cmd.replay_set_version.header.sub_type = + DMUB_CMD__REPLAY_SET_VERSION; + cmd.replay_set_version.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_version) - + sizeof(struct dmub_cmd_header); + //Cmd Body + cmd.replay_set_version.replay_set_version_data.panel_inst = + cmd_element->version_data.panel_inst; + cmd.replay_set_version.replay_set_version_data.version = + cmd_element->version_data.version; + break; case Replay_Set_General_Cmd: //Header cmd.replay_set_general_cmd.header.sub_type = diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h index e6346c0ffc0e..07c79739a980 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -27,11 +27,12 @@ struct dmub_replay_funcs { void (*replay_send_cmd)(struct dmub_replay *dmub, enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element); void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint32_t coasting_vtotal, - uint8_t panel_inst); + uint8_t panel_inst, uint16_t frame_skip_number); void (*replay_residency)(struct dmub_replay *dmub, uint8_t panel_inst, uint32_t *residency, const bool is_start, const enum pr_residency_mode mode); void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub, - unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal); + unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal, + uint16_t frame_skip_number); }; struct dmub_replay *dmub_replay_create(struct dc_context *ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile deleted file mode 100644 index 4c21ce42054c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ /dev/null @@ -1,141 +0,0 @@ -# SPDX-License-Identifier: MIT */ -# -# Copyright 2023 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# Authors: AMD -# -# Makefile for dml2. - -dml2_ccflags := $(CC_FLAGS_FPU) -dml2_rcflags := $(CC_FLAGS_NO_FPU) - -ifneq ($(CONFIG_FRAME_WARN),0) - ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) - ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy) - frame_warn_limit := 4096 - else - frame_warn_limit := 3072 - endif - else - frame_warn_limit := 2048 - endif - - ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y) - frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit) - endif -endif - -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2 -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/ - -CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) -CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_ccflags) - -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_rcflags) - -DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \ - dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \ - dml_display_rq_dlg_calc.o - -AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DML2) - -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags) - -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags) - -DML21 := src/dml2_top/dml2_top_interfaces.o -DML21 += src/dml2_top/dml2_top_soc15.o -DML21 += src/dml2_core/dml2_core_dcn4.o -DML21 += src/dml2_core/dml2_core_utils.o -DML21 += src/dml2_core/dml2_core_factory.o -DML21 += src/dml2_core/dml2_core_dcn4_calcs.o -DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o -DML21 += src/dml2_dpmm/dml2_dpmm_factory.o -DML21 += src/dml2_mcg/dml2_mcg_dcn4.o -DML21 += src/dml2_mcg/dml2_mcg_factory.o -DML21 += src/dml2_pmo/dml2_pmo_dcn3.o -DML21 += src/dml2_pmo/dml2_pmo_factory.o -DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o -DML21 += src/dml2_standalone_libraries/lib_float_math.o -DML21 += dml21_translation_helper.o -DML21 += dml21_wrapper.o -DML21 += dml21_utils.o - -AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DML21) - diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile new file mode 100644 index 000000000000..97e068b6bf6b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile @@ -0,0 +1,140 @@ +# SPDX-License-Identifier: MIT */ +# +# Copyright 2023 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Authors: AMD +# +# Makefile for dml2. + +dml2_ccflags := $(CC_FLAGS_FPU) +dml2_rcflags := $(CC_FLAGS_NO_FPU) + +ifneq ($(CONFIG_FRAME_WARN),0) + ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) + ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy) + frame_warn_limit := 4096 + else + frame_warn_limit := 3072 + endif + else + frame_warn_limit := 2056 + endif + + ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y) + frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit) + endif +endif + +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0 +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_core +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_mcg/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_dpmm/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_pmo/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/inc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/ + +CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags) + +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags) + +DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \ + dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \ + dml_display_rq_dlg_calc.o + +AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2_0/,$(DML2)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DML2) + +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags) + +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags) + +DML21 := src/dml2_top/dml2_top_interfaces.o +DML21 += src/dml2_top/dml2_top_soc15.o +DML21 += src/dml2_core/dml2_core_dcn4.o +DML21 += src/dml2_core/dml2_core_utils.o +DML21 += src/dml2_core/dml2_core_factory.o +DML21 += src/dml2_core/dml2_core_dcn4_calcs.o +DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o +DML21 += src/dml2_dpmm/dml2_dpmm_factory.o +DML21 += src/dml2_mcg/dml2_mcg_dcn4.o +DML21 += src/dml2_mcg/dml2_mcg_factory.o +DML21 += src/dml2_pmo/dml2_pmo_dcn3.o +DML21 += src/dml2_pmo/dml2_pmo_factory.o +DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o +DML21 += src/dml2_standalone_libraries/lib_float_math.o +DML21 += dml21_translation_helper.o +DML21 += dml21_wrapper.o +DML21 += dml21_utils.o + +AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DML21) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h index e450445bc05d..b954c9648fbe 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h @@ -53,17 +53,17 @@ typedef const void *const_pvoid; typedef const char *const_pchar; typedef struct rgba_struct { - uint8 a; - uint8 r; - uint8 g; - uint8 b; + uint8 a; + uint8 r; + uint8 g; + uint8 b; } rgba_t; typedef struct { - uint8 blue; - uint8 green; - uint8 red; - uint8 alpha; + uint8 blue; + uint8 green; + uint8 red; + uint8 alpha; } gen_color_t; typedef union { @@ -87,7 +87,7 @@ typedef union { } uintfloat64; #ifndef UNREFERENCED_PARAMETER -#define UNREFERENCED_PARAMETER(x) x = x +#define UNREFERENCED_PARAMETER(x) (x = x) #endif #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c index 4b9b2e84d381..c468f492b876 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c @@ -10205,6 +10205,7 @@ dml_bool_t dml_get_is_phantom_pipe(struct display_mode_lib_st *mode_lib, dml_uin return (mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange[plane_idx] == dml_use_mall_pstate_change_phantom_pipe); } + #define dml_get_per_surface_var_func(variable, type, interval_var) type dml_get_##variable(struct display_mode_lib_st *mode_lib, dml_uint_t surface_idx) \ { \ dml_uint_t plane_idx; \ @@ -10333,3 +10334,4 @@ dml_get_per_surface_var_func(bigk_fragment_size, dml_uint_t, mode_lib->mp.BIGK_F dml_get_per_surface_var_func(dpte_bytes_per_row, dml_uint_t, mode_lib->mp.PixelPTEBytesPerRow); dml_get_per_surface_var_func(meta_bytes_per_row, dml_uint_t, mode_lib->mp.MetaRowByte); dml_get_per_surface_var_func(det_buffer_size_kbytes, dml_uint_t, mode_lib->ms.DETBufferSizeInKByte); + diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h index a38ed89c47a9..a38ed89c47a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h index dbeb08466092..5b40dcdc4406 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h @@ -274,7 +274,6 @@ enum dml_clk_cfg_policy { dml_use_state_freq = 2 }; - struct soc_state_bounding_box_st { dml_float_t socclk_mhz; dml_float_t dscclk_mhz; @@ -1894,7 +1893,7 @@ struct display_mode_lib_scratch_st { struct CalculatePrefetchSchedule_params_st CalculatePrefetchSchedule_params; }; -/// @brief Represent the overall soc/ip enviroment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output +/// @brief Represent the overall soc/ip environment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output struct display_mode_lib_st { dml_uint_t project; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h index 14d389525296..e574c81edf5e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h @@ -52,7 +52,7 @@ #define __DML_VBA_DEBUG__ #define __DML_VBA_ENABLE_INLINE_CHECK_ 0 #define __DML_VBA_MIN_VSTARTUP__ 9 //<brief At which vstartup the DML start to try if the mode can be supported -#define __DML_ARB_TO_RET_DELAY__ 7 + 95 //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET) +#define __DML_ARB_TO_RET_DELAY__ (7 + 95) //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET) #define __DML_MIN_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation #define __DML_MAX_VRATIO_PRE__ 4.0 //<brief Prefetch schedule max vratio #define __DML_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c index 89890c88fd66..89890c88fd66 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h index 113b0265e1d1..a82b49cf7fb0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h @@ -30,7 +30,6 @@ #include "display_mode_core_structs.h" #include "cmntypes.h" - #include "dml_assert.h" #include "dml_logging.h" @@ -72,5 +71,4 @@ __DML_DLL_EXPORT__ dml_uint_t dml_get_plane_idx(const struct display_mode_lib_st __DML_DLL_EXPORT__ dml_uint_t dml_get_pipe_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t plane_idx); __DML_DLL_EXPORT__ void dml_calc_pipe_plane_mapping(const struct dml_hw_resource_st *hw, dml_uint_t *pipe_plane); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c index bf5e7f4e0416..bf5e7f4e0416 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h index 9880d3e0398e..9880d3e0398e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c index ee721606b883..ee721606b883 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h index 4bff52eaaef8..4bff52eaaef8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c index 08f7f03b1023..798abb2b2e67 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c @@ -224,9 +224,7 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); /* Populate stream, plane mappings and other fields in display config. */ - DC_FP_START(); result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); - DC_FP_END(); if (!result) return false; @@ -281,9 +279,7 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); mode_support->dml2_instance = dml_init->dml2_instance; - DC_FP_START(); dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); - DC_FP_END(); dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; DC_FP_START(); is_supported = dml2_check_mode_supported(mode_support); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h index 15f92029d2e5..15f92029d2e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h index 793e1c038efd..16a4f97bca4e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML_DML_DCN4_SOC_BB__ #define __DML_DML_DCN4_SOC_BB__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h index 281d7ad230d8..281d7ad230d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h index a64ec4dcf11a..a64ec4dcf11a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h index 91955bbe24b8..bf57df42d1d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h @@ -46,7 +46,6 @@ struct dml2_display_dlg_regs { uint32_t dst_y_delta_drq_limit; uint32_t refcyc_per_vm_dmdata; uint32_t dmdata_dl_delta; - uint32_t dst_y_svp_drq_limit; // MRQ uint32_t refcyc_per_meta_chunk_vblank_l; @@ -122,6 +121,8 @@ struct dml2_display_rq_regs { uint32_t crq_expansion_mode; uint32_t plane1_base_address; uint32_t unbounded_request_enabled; + bool pte_buffer_mode; + bool force_one_row_for_frame; // MRQ uint32_t mrq_expansion_mode; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h index e8dc6471c0be..35aa954248cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h @@ -49,6 +49,11 @@ enum dml2_source_format_class { dml2_422_packed_12 = 18 }; +enum dml2_sample_positioning { + dml2_interstitial = 0, + dml2_cosited = 1 +}; + enum dml2_rotation_angle { dml2_rotation_0 = 0, dml2_rotation_90 = 1, @@ -82,6 +87,15 @@ enum dml2_output_link_dp_rate { dml2_dp_rate_uhbr20 = 6 }; +enum dml2_pstate_type { + dml2_pstate_type_uclk = 0, + dml2_pstate_type_fclk = 1, + dml2_pstate_type_ppt = 2, + dml2_pstate_type_temp_read = 3, + dml2_pstate_type_dummy_pstate = 4, + dml2_pstate_type_count = 5 +}; + enum dml2_uclk_pstate_change_strategy { dml2_uclk_pstate_change_strategy_auto = 0, dml2_uclk_pstate_change_strategy_force_vactive = 1, @@ -222,7 +236,11 @@ struct dml2_composition_cfg { struct { bool enabled; + bool easf_enabled; + bool isharp_enabled; bool upsp_enabled; + enum dml2_sample_positioning upsp_sample_positioning; + unsigned int upsp_vtaps; struct { double h_ratio; double v_ratio; @@ -384,7 +402,7 @@ struct dml2_plane_parameters { // reserved_vblank_time_ns is the minimum time to reserve in vblank for Twait // The actual reserved vblank time used for the corresponding stream in mode_programming would be at least as much as this per-plane override. long reserved_vblank_time_ns; - unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay + unsigned int max_vactive_det_fill_delay_us[dml2_pstate_type_count]; // 0 = no reserved time, +ve = explicit max delay unsigned int gpuvm_min_page_size_kbytes; unsigned int hostvm_min_page_size_kbytes; @@ -413,7 +431,6 @@ struct dml2_stream_parameters { bool disable_dynamic_odm; bool disable_subvp; int minimum_vblank_idle_requirement_us; - bool minimize_active_latency_hiding; struct { struct { @@ -456,6 +473,7 @@ struct dml2_display_cfg { bool enable; bool value; } force_nom_det_size_kbytes; + bool mode_support_check_disable; bool mcache_admissibility_check_disable; bool surface_viewport_size_check_disable; @@ -478,7 +496,6 @@ struct dml2_display_cfg { bool synchronize_ddr_displays_for_uclk_pstate_change; bool max_outstanding_when_urgent_expected_disable; bool enable_subvp_implicit_pmo; //enables PMO to switch pipe uclk strategy to subvp, and generate phantom programming - unsigned int best_effort_min_active_latency_hiding_us; bool all_streams_blanked; } overrides; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h index 8f624a912e78..8f624a912e78 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h index 176f55947664..1fbc520c2540 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h @@ -89,8 +89,8 @@ struct dml2_soc_qos_parameters { struct dml2_soc_power_management_parameters { double dram_clk_change_blackout_us; - double dram_clk_change_read_only_us; - double dram_clk_change_write_only_us; + double dram_clk_change_read_only_us; // deprecated + double dram_clk_change_write_only_us; // deprecated double fclk_change_blackout_us; double g7_ppt_blackout_us; double g7_temperature_read_blackout_us; @@ -145,6 +145,8 @@ struct dml2_soc_bb { struct dml2_soc_vmin_clock_limits vmin_limit; double lower_bound_bandwidth_dchub; + double fraction_of_urgent_bandwidth_nominal_target; + double fraction_of_urgent_bandwidth_flip_target; unsigned int dprefclk_mhz; unsigned int xtalclk_mhz; unsigned int pcie_refclk_mhz; @@ -170,6 +172,7 @@ struct dml2_soc_bb { struct dml2_ip_capabilities { unsigned int pipe_count; unsigned int otg_count; + unsigned int TDLUT_33cube_count; unsigned int num_dsc; unsigned int max_num_dp2p0_streams; unsigned int max_num_hdmi_frl_outputs; @@ -188,7 +191,9 @@ struct dml2_ip_capabilities { unsigned int subvp_prefetch_end_to_mall_start_us; unsigned int subvp_fw_processing_delay; unsigned int max_vactive_det_fill_delay_us; - + unsigned int ppt_max_allow_delay_us; + unsigned int temp_read_max_allow_delay_us; + unsigned int dummy_pstate_max_allow_delay_us; /* FAMS2 delays */ struct { unsigned int max_allow_delay_us; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h index 41adb1104d0f..452e4a2e72c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h @@ -70,6 +70,8 @@ struct dml2_pmo_options { bool disable_dyn_odm; bool disable_dyn_odm_for_multi_stream; bool disable_dyn_odm_for_stream_with_svp; + struct dml2_pmo_pstate_strategy *override_strategy_lists[DML2_MAX_PLANES]; + unsigned int num_override_strategies_per_list[DML2_MAX_PLANES]; }; struct dml2_options { @@ -310,6 +312,7 @@ struct dml2_mode_support_info { bool NumberOfOTGSupport; bool NumberOfHDMIFRLSupport; bool NumberOfDP2p0Support; + bool NumberOfTDLUT33cubeSupport; bool WritebackScaleRatioAndTapsSupport; bool CursorSupport; bool PitchSupport; @@ -357,6 +360,8 @@ struct dml2_mode_support_info { unsigned int AlignedCPitch[DML2_MAX_PLANES]; bool g6_temp_read_support; bool temp_read_or_ppt_support; + bool qos_bandwidth_support; + bool dcfclk_support; }; // dml2_mode_support_info struct dml2_display_cfg_programming { @@ -671,6 +676,8 @@ struct dml2_display_cfg_programming { unsigned int PrefetchMode[DML2_MAX_PLANES]; // LEGACY_ONLY bool ROBUrgencyAvoidance; double LowestPrefetchMargin; + + unsigned int pstate_recout_reduction_lines[DML2_MAX_PLANES]; } misc; struct dml2_mode_support_info mode_support_info; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c index 6ee37386f672..eba948e187c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c @@ -28,6 +28,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = { .writeback_interface_buffer_size_kbytes = 90, //Number of pipes after DCN Pipe harvesting .max_num_dpp = 4, + .max_num_opp = 4, .max_num_otg = 4, .max_num_wb = 1, .max_dchub_pscl_bw_pix_per_clk = 4, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h index a68bb001a346..a68bb001a346 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index bf62d42b3f78..a02e9fd6b5ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -1303,6 +1303,7 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 16; } else { + if (Output == dml2_hdmi || Output == dml2_hdmifrl) { NonDSCBPP0 = 24; NonDSCBPP1 = 24; @@ -1320,6 +1321,7 @@ static double TruncToValidBPP( MaxDSCBPP = 16; } } + if (Output == dml2_dp2p0) { MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0; } else if (DSCEnable && Output == dml2_dp) { @@ -4047,7 +4049,9 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode, bool UseDSC, unsigned int NumberOfDSCSlices, unsigned int TotalNumberOfActiveDPP, + unsigned int TotalNumberOfActiveOPP, unsigned int MaxNumDPP, + unsigned int MaxNumOPP, double DISPCLKRequired, unsigned int NumberOfDPPRequired, unsigned int MaxHActiveForDSC, @@ -4063,7 +4067,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode, if (DISPCLKRequired > MaxDispclk) return false; - if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP) + if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP || (TotalNumberOfActiveOPP + NumberOfDPPRequired) > MaxNumOPP) return false; if (are_odm_segments_symmetrical) { if (HActive % (NumberOfDPPRequired * pixels_per_clock_cycle)) @@ -4109,7 +4113,9 @@ static noinline_for_stack void CalculateODMMode( double MaxDispclk, bool DSCEnable, unsigned int TotalNumberOfActiveDPP, + unsigned int TotalNumberOfActiveOPP, unsigned int MaxNumDPP, + unsigned int MaxNumOPP, double PixelClock, unsigned int NumberOfDSCSlices, @@ -4179,7 +4185,9 @@ static noinline_for_stack void CalculateODMMode( UseDSC, NumberOfDSCSlices, TotalNumberOfActiveDPP, + TotalNumberOfActiveOPP, MaxNumDPP, + MaxNumOPP, DISPCLKRequired, NumberOfDPPRequired, MaxHActiveForDSC, @@ -6964,7 +6972,7 @@ static void calculate_bytes_to_fetch_required_to_hide_latency( stream_index = p->display_cfg->plane_descriptors[plane_index].stream_index; - dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us / + dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us[0] / ((double)p->display_cfg->stream_descriptors[stream_index].timing.h_total / (double)p->display_cfg->stream_descriptors[stream_index].timing.pixel_clock_khz * 1000.0)); @@ -7061,9 +7069,9 @@ static void calculate_excess_vactive_bandwidth_required( excess_vactive_fill_bw_l[plane_index] = 0.0; excess_vactive_fill_bw_c[plane_index] = 0.0; - if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us > 0) { - excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us; - excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us; + if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] > 0) { + excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk]; + excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk]; } } } @@ -8358,6 +8366,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params); mode_lib->ms.TotalNumberOfActiveDPP = 0; + mode_lib->ms.TotalNumberOfActiveOPP = 0; mode_lib->ms.support.TotalAvailablePipesSupport = true; for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { @@ -8393,7 +8402,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.max_dispclk_freq_mhz, false, // DSCEnable mode_lib->ms.TotalNumberOfActiveDPP, + mode_lib->ms.TotalNumberOfActiveOPP, mode_lib->ip.max_num_dpp, + mode_lib->ip.max_num_opp, ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000), mode_lib->ms.support.NumberOfDSCSlices[k], @@ -8412,7 +8423,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.max_dispclk_freq_mhz, true, // DSCEnable mode_lib->ms.TotalNumberOfActiveDPP, + mode_lib->ms.TotalNumberOfActiveOPP, mode_lib->ip.max_num_dpp, + mode_lib->ip.max_num_opp, ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000), mode_lib->ms.support.NumberOfDSCSlices[k], @@ -8516,20 +8529,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 1; + mode_lib->ms.NoOfOPP[k] = 1; if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 4; + mode_lib->ms.NoOfOPP[k] = 4; } else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 3; + mode_lib->ms.NoOfOPP[k] = 3; } else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 2; + mode_lib->ms.NoOfOPP[k] = 2; } else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) { mode_lib->ms.MPCCombine[k] = true; mode_lib->ms.NoOfDPP[k] = 2; - mode_lib->ms.TotalNumberOfActiveDPP++; } else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 1; @@ -8540,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) { mode_lib->ms.MPCCombine[k] = true; mode_lib->ms.NoOfDPP[k] = 2; - mode_lib->ms.TotalNumberOfActiveDPP++; } } #if defined(__DML_VBA_DEBUG__) @@ -8548,8 +8563,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out #endif } + mode_lib->ms.TotalNumberOfActiveDPP = 0; + mode_lib->ms.TotalNumberOfActiveOPP = 0; + for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { + mode_lib->ms.TotalNumberOfActiveDPP += mode_lib->ms.NoOfDPP[k]; + mode_lib->ms.TotalNumberOfActiveOPP += mode_lib->ms.NoOfOPP[k]; + } if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp) mode_lib->ms.support.TotalAvailablePipesSupport = false; + if (mode_lib->ms.TotalNumberOfActiveOPP > (unsigned int)mode_lib->ip.max_num_opp) + mode_lib->ms.support.TotalAvailablePipesSupport = false; mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0; @@ -9028,11 +9051,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->ms.SwathWidthC; calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->ms.SwathHeightY; calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->ms.SwathHeightC; - calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us; + calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us; /* outputs */ - calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l; - calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c; + calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk]; + calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk]; calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params); @@ -9040,8 +9063,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out calculate_excess_vactive_bandwidth_required( display_cfg, mode_lib->ms.num_active_planes, - s->pstate_bytes_required_l, - s->pstate_bytes_required_c, + s->pstate_bytes_required_l[dml2_pstate_type_uclk], + s->pstate_bytes_required_c[dml2_pstate_type_uclk], /* outputs */ mode_lib->ms.excess_vactive_fill_bw_l, mode_lib->ms.excess_vactive_fill_bw_c); @@ -9483,8 +9506,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out calculate_vactive_det_fill_latency( display_cfg, mode_lib->ms.num_active_planes, - s->pstate_bytes_required_l, - s->pstate_bytes_required_c, + s->pstate_bytes_required_l[dml2_pstate_type_uclk], + s->pstate_bytes_required_c[dml2_pstate_type_uclk], mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0, mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1, mode_lib->ms.vactive_sw_bw_l, @@ -9492,7 +9515,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.surface_avg_vactive_required_bw, mode_lib->ms.surface_peak_required_bw, /* outputs */ - mode_lib->ms.dram_change_vactive_det_fill_delay_us); + mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk]); #ifdef __DML_VBA_DEBUG__ DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us); @@ -10986,11 +11009,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->mp.SwathWidthC; calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->mp.SwathHeightY; calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->mp.SwathHeightC; - calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us; + calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us; /* outputs */ - calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l; - calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c; + calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk]; + calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk]; calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params); @@ -10998,8 +11021,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_excess_vactive_bandwidth_required( display_cfg, s->num_active_planes, - s->pstate_bytes_required_l, - s->pstate_bytes_required_c, + s->pstate_bytes_required_l[dml2_pstate_type_uclk], + s->pstate_bytes_required_c[dml2_pstate_type_uclk], /* outputs */ mode_lib->mp.excess_vactive_fill_bw_l, mode_lib->mp.excess_vactive_fill_bw_c); @@ -12756,7 +12779,7 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna { const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index]; const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index]; - const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index]; + const struct dml2_pstate_meta *stream_pstate_meta = &display_cfg->stage3.stream_pstate_meta[plane_descriptor->stream_index]; struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base; union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state; @@ -12771,24 +12794,24 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna /* from display configuration */ base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total; base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total; - base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal - + base_programming->vblank_start = (uint16_t)(stream_pstate_meta->nom_vtotal - stream_descriptor->timing.v_front_porch); - base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal - + base_programming->vblank_end = (uint16_t)(stream_pstate_meta->nom_vtotal - stream_descriptor->timing.v_front_porch - stream_descriptor->timing.v_active); base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled; /* from meta */ base_programming->otg_vline_time_ns = - (unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0); - base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines; - base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines; - base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines; - base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal - + (unsigned int)(stream_pstate_meta->otg_vline_time_us * 1000.0); + base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_pstate_meta->scheduling_delay_otg_vlines; + base_programming->contention_delay_otg_vlines = (uint8_t)stream_pstate_meta->contention_delay_otg_vlines; + base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines; + base_programming->drr_keepout_otg_vline = (uint16_t)(stream_pstate_meta->nom_vtotal - stream_descriptor->timing.v_front_porch - - stream_fams2_meta->method_drr.programming_delay_otg_vlines); - base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines; - base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal; + stream_pstate_meta->method_drr.programming_delay_otg_vlines); + base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_pstate_meta->allow_to_target_delay_otg_vlines; + base_programming->max_vtotal = (uint16_t)stream_pstate_meta->max_vtotal; /* from core */ base_programming->config.bits.min_ttu_vblank_usable = true; @@ -12807,11 +12830,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna /* legacy vactive */ base_programming->type = FAMS2_STREAM_TYPE_VACTIVE; sub_programming->legacy.vactive_det_fill_delay_otg_vlines = - (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_vactive.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_vactive.common.allow_end_otg_vline; base_programming->config.bits.clamp_vtotal_min = true; break; case dml2_pstate_method_vblank: @@ -12819,22 +12842,22 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna /* legacy vblank */ base_programming->type = FAMS2_STREAM_TYPE_VBLANK; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_vblank.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_vblank.common.allow_end_otg_vline; base_programming->config.bits.clamp_vtotal_min = true; break; case dml2_pstate_method_fw_drr: /* drr */ base_programming->type = FAMS2_STREAM_TYPE_DRR; sub_programming->drr.programming_delay_otg_vlines = - (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_drr.programming_delay_otg_vlines; sub_programming->drr.nom_stretched_vtotal = - (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal; + (uint16_t)stream_pstate_meta->method_drr.stretched_vtotal; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_drr.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_drr.common.allow_end_otg_vline; /* drr only clamps to vtotal min for single display */ base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1; sub_programming->drr.only_stretch_if_required = true; @@ -12847,13 +12870,13 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna (uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0); sub_programming->subvp.vratio_denominator = 1000; sub_programming->subvp.programming_delay_otg_vlines = - (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_subvp.programming_delay_otg_vlines; sub_programming->subvp.prefetch_to_mall_otg_vlines = - (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines; sub_programming->subvp.phantom_vtotal = - (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal; + (uint16_t)stream_pstate_meta->method_subvp.phantom_vtotal; sub_programming->subvp.phantom_vactive = - (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive; + (uint16_t)stream_pstate_meta->method_subvp.phantom_vactive; sub_programming->subvp.config.bits.is_multi_planar = plane_descriptor->surface.plane1.height > 0; sub_programming->subvp.config.bits.is_yuv420 = @@ -12862,9 +12885,9 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna plane_descriptor->pixel_format == dml2_420_12; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_subvp.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_subvp.common.allow_end_otg_vline; base_programming->config.bits.clamp_vtotal_min = true; break; case dml2_pstate_method_reserved_hw: @@ -12920,7 +12943,8 @@ void dml2_core_calcs_get_plane_support_info(const struct dml2_display_cfg *displ out->active_latency_hiding_us = (int)mode_lib->ms.VActiveLatencyHidingUs[plane_idx]; - out->dram_change_vactive_det_fill_delay_us = (unsigned int)math_ceil(mode_lib->ms.dram_change_vactive_det_fill_delay_us[plane_idx]); + out->vactive_det_fill_delay_us[dml2_pstate_type_uclk] = + (unsigned int)math_ceil(mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk][plane_idx]); } void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index) @@ -13001,7 +13025,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState; out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize; out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits; - out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.temp_read_or_ppt_support; + out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.global_temp_read_or_ppt_supported; out->informative.mode_support_info.g6_temp_read_support = mode_lib->ms.support.g6_temp_read_support; out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots; @@ -13027,7 +13051,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.mode_support_info.VRatioInPrefetchSupported = mode_lib->ms.support.VRatioInPrefetchSupported; out->informative.mode_support_info.DISPCLK_DPPCLK_Support = mode_lib->ms.support.DISPCLK_DPPCLK_Support; out->informative.mode_support_info.TotalAvailablePipesSupport = mode_lib->ms.support.TotalAvailablePipesSupport; + out->informative.mode_support_info.NumberOfTDLUT33cubeSupport = mode_lib->ms.support.NumberOfTDLUT33cubeSupport; out->informative.mode_support_info.ViewportSizeSupport = mode_lib->ms.support.ViewportSizeSupport; + out->informative.mode_support_info.qos_bandwidth_support = mode_lib->ms.support.qos_bandwidth_support; + out->informative.mode_support_info.dcfclk_support = mode_lib->ms.support.dcfclk_support; for (k = 0; k < out->display_config.num_planes; k++) { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h index 27ef0e096b25..27ef0e096b25 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c index 640087e862f8..cc4f0663c6d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c @@ -15,6 +15,8 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance memset(out, 0, sizeof(struct dml2_core_instance)); + out->project_id = project_id; + switch (project_id) { case dml2_project_dcn4x_stage1: result = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h index 411c514fe65c..411c514fe65c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h index ffb8c09f37a5..1087a8c926ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h @@ -36,7 +36,9 @@ struct dml2_core_ip_params { unsigned int max_line_buffer_lines; unsigned int writeback_interface_buffer_size_kbytes; unsigned int max_num_dpp; + unsigned int max_num_opp; unsigned int max_num_otg; + unsigned int TDLUT_33cube_count; unsigned int max_num_wb; unsigned int max_dchub_pscl_bw_pix_per_clk; unsigned int max_pscl_lb_bw_pix_per_clk; @@ -46,6 +48,7 @@ struct dml2_core_ip_params { double max_vscl_ratio; unsigned int max_hscl_taps; unsigned int max_vscl_taps; + unsigned int odm_combine_support_mask; unsigned int num_dsc; unsigned int maximum_dsc_bits_per_component; unsigned int maximum_pixels_per_line_per_dsc_unit; @@ -82,7 +85,6 @@ struct dml2_core_ip_params { unsigned int subvp_swath_height_margin_lines; unsigned int subvp_fw_processing_delay_us; unsigned int subvp_pstate_allow_width_us; - // MRQ bool dcn_mrq_present; unsigned int zero_size_buffer_entries; @@ -103,6 +105,8 @@ struct dml2_core_internal_DmlPipe { unsigned int DPPPerSurface; bool ScalerEnabled; bool UPSPEnabled; + unsigned int UPSPVTaps; + enum dml2_sample_positioning UPSPSamplePositioning; enum dml2_rotation_angle RotationAngle; bool mirrored; unsigned int ViewportHeight; @@ -230,6 +234,7 @@ struct dml2_core_internal_mode_support_info { bool MSOOrODMSplitWithNonDPLink; bool NotEnoughLanesForMSO; bool NumberOfOTGSupport; + bool NumberOfTDLUT33cubeSupport; bool NumberOfHDMIFRLSupport; bool NumberOfDP2p0Support; bool WritebackScaleRatioAndTapsSupport; @@ -259,8 +264,11 @@ struct dml2_core_internal_mode_support_info { bool DCCMetaBufferSizeNotExceeded; enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES]; enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES]; + enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES]; + bool global_dram_clock_change_support_required; bool global_dram_clock_change_supported; bool global_fclk_change_supported; + bool global_temp_read_or_ppt_supported; bool USRRetrainingSupport; bool AvgBandwidthSupport; bool UrgVactiveBandwidthSupport; @@ -331,7 +339,6 @@ struct dml2_core_internal_mode_support_info { bool incorrect_imall_usage; bool g6_temp_read_support; - bool temp_read_or_ppt_support; struct dml2_core_internal_watermarks watermarks; bool dcfclk_support; @@ -566,6 +573,7 @@ struct dml2_core_internal_mode_support { enum dml2_odm_mode ODMMode[DML2_MAX_PLANES]; unsigned int SurfaceSizeInMALL[DML2_MAX_PLANES]; unsigned int NoOfDPP[DML2_MAX_PLANES]; + unsigned int NoOfOPP[DML2_MAX_PLANES]; bool MPCCombine[DML2_MAX_PLANES]; double dcfclk_deepsleep; double MinDPPCLKUsingSingleDPP[DML2_MAX_PLANES]; @@ -576,6 +584,7 @@ struct dml2_core_internal_mode_support { bool PTEBufferSizeNotExceeded[DML2_MAX_PLANES]; bool DCCMetaBufferSizeNotExceeded[DML2_MAX_PLANES]; unsigned int TotalNumberOfActiveDPP; + unsigned int TotalNumberOfActiveOPP; unsigned int TotalNumberOfSingleDPPSurfaces; unsigned int TotalNumberOfDCCActiveDPP; unsigned int Total3dlutActive; @@ -584,7 +593,7 @@ struct dml2_core_internal_mode_support { double VActiveLatencyHidingMargin[DML2_MAX_PLANES]; double VActiveLatencyHidingUs[DML2_MAX_PLANES]; unsigned int MaxVStartupLines[DML2_MAX_PLANES]; - double dram_change_vactive_det_fill_delay_us[DML2_MAX_PLANES]; + double pstate_vactive_det_fill_delay_us[dml2_pstate_type_count][DML2_MAX_PLANES]; unsigned int num_mcaches_l[DML2_MAX_PLANES]; unsigned int mcache_row_bytes_l[DML2_MAX_PLANES]; @@ -614,8 +623,8 @@ struct dml2_core_internal_mode_support { unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES]; unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_l[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_c[DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES]; unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES]; unsigned int cursor_bytes_per_line[DML2_MAX_PLANES]; @@ -639,7 +648,7 @@ struct dml2_core_internal_mode_support { unsigned int DSTYAfterScaler[DML2_MAX_PLANES]; unsigned int DSTXAfterScaler[DML2_MAX_PLANES]; - enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; + enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES]; }; /// @brief A mega structure that houses various info for model programming step. @@ -830,6 +839,7 @@ struct dml2_core_internal_mode_program { double max_urgent_latency_us; double df_response_time_us; + enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES]; // ------------------- // Output // ------------------- @@ -956,11 +966,12 @@ struct dml2_core_internal_mode_program { double MaxActiveFCLKChangeLatencySupported; bool USRRetrainingSupport; bool g6_temp_read_support; - bool temp_read_or_ppt_support; enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES]; enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES]; + enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES]; bool global_dram_clock_change_supported; bool global_fclk_change_supported; + bool global_temp_read_or_ppt_supported; double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES]; double WritebackAllowFCLKChangeEndPosition[DML2_MAX_PLANES]; double WritebackAllowDRAMClockChangeEndPosition[DML2_MAX_PLANES]; @@ -1127,8 +1138,8 @@ struct dml2_core_calcs_mode_support_locals { unsigned int cursor_bytes[DML2_MAX_PLANES]; bool stream_visited[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_l[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_c[DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES]; double prefetch_sw_bytes[DML2_MAX_PLANES]; double Tpre_rounded[DML2_MAX_PLANES]; @@ -1219,8 +1230,8 @@ struct dml2_core_calcs_mode_programming_locals { double Tr0_trips_flip_rounded[DML2_MAX_PLANES]; unsigned int per_pipe_flip_bytes[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_l[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_c[DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES]; double prefetch_sw_bytes[DML2_MAX_PLANES]; double Tpre_rounded[DML2_MAX_PLANES]; @@ -1306,7 +1317,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params { unsigned int HostVMMinPageSize; unsigned int DCCMetaBufferSizeBytes; bool mrq_present; - enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; + enum dml2_pstate_method *uclk_pstate_switch_modes; // Output bool *PTEBufferSizeNotExceeded; @@ -1733,10 +1744,12 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param unsigned int max_request_size_bytes; unsigned int *meta_row_height_l; unsigned int *meta_row_height_c; + enum dml2_pstate_method *uclk_pstate_switch_modes; // Output struct dml2_core_internal_watermarks *Watermark; enum dml2_pstate_change_support *DRAMClockChangeSupport; + bool *global_dram_clock_change_support_required; bool *global_dram_clock_change_supported; double *MaxActiveDRAMClockChangeLatencySupported; unsigned int *SubViewportLinesNeededInMALL; @@ -1747,10 +1760,10 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param double *VActiveLatencyHidingMargin; double *VActiveLatencyHidingUs; bool *g6_temp_read_support; - bool *temp_read_or_ppt_support; + enum dml2_pstate_change_support *temp_read_or_ppt_support; + bool *global_temp_read_or_ppt_supported; }; - struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params { const struct dml2_display_cfg *display_cfg; unsigned int ConfigReturnBufferSizeInKByte; @@ -2240,7 +2253,7 @@ struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params unsigned int *swath_width_c; unsigned int *swath_height_l; unsigned int *swath_height_c; - double latency_to_hide_us; + double latency_to_hide_us[DML2_MAX_PLANES]; /* outputs */ unsigned int *bytes_required_l; @@ -2308,6 +2321,7 @@ struct dml2_core_calcs_mode_support_ex { const struct dml2_display_cfg *in_display_cfg; const struct dml2_mcg_min_clock_table *min_clk_table; int min_clk_index; + enum dml2_project_id project_id; //unsigned int in_state_index; struct dml2_core_internal_mode_support_info *out_evaluation_info; }; @@ -2320,6 +2334,7 @@ struct dml2_core_calcs_mode_programming_ex { const struct dml2_mcg_min_clock_table *min_clk_table; const struct core_display_cfg_support_info *cfg_support_info; int min_clk_index; + enum dml2_project_id project_id; struct dml2_display_cfg_programming *programming; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c index 5f301befed16..b57d0f6ea6a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c @@ -306,6 +306,8 @@ void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mod DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize); if (!fail_only || support->g6_temp_read_support == 0) DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support); + if (!fail_only || (support->global_dram_clock_change_supported == 0 && support->global_dram_clock_change_support_required)) + DML_LOG_VERBOSE("DML: support: dram_clock_change_support = %d\n", support->global_dram_clock_change_supported); if (!fail_only || support->ImmediateFlipSupport == 0) DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport); if (!fail_only || support->LinkCapacitySupport == 0) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h index 95f0d017add4..95f0d017add4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c index 22969a533a7b..22969a533a7b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h index e7b58f2efda4..e7b58f2efda4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c index dfd01440737d..dfd01440737d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h index 20ba2e446f1d..20ba2e446f1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c index a265f254152c..a265f254152c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h index 02da6f45cbf7..f54fde8fba90 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h @@ -10,4 +10,4 @@ bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out); bool mcg_dcn4_unit_test(void); -#endif +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c index c60b8fe90819..c60b8fe90819 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h index ad307deca3b0..ad307deca3b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c index 1b9579a32ff2..1b9579a32ff2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h index f00bd9e72a86..f00bd9e72a86 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c index d88b3e0082dd..c26e100fcaf2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c @@ -642,6 +642,11 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) int i = 0; struct dml2_pmo_instance *pmo = in_out->instance; + unsigned int base_list_size = 0; + const struct dml2_pmo_pstate_strategy *base_list = NULL; + unsigned int *expanded_list_size = NULL; + struct dml2_pmo_pstate_strategy *expanded_list = NULL; + pmo->soc_bb = in_out->soc_bb; pmo->ip_caps = in_out->ip_caps; pmo->mpc_combine_limit = 2; @@ -656,53 +661,71 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) pmo->options = in_out->options; /* generate permutations of p-state configs from base strategy list */ - for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) { - switch (i) { + for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) { + switch (i+1) { case 1: - DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); - - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_1_display, - base_strategy_list_1_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_1_display; + base_list_size = base_strategy_list_1_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display; + break; case 2: - DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); - - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_2_display, - base_strategy_list_2_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_2_display; + base_list_size = base_strategy_list_2_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display; + break; case 3: - DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); - - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_3_display, - base_strategy_list_3_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_3_display; + base_list_size = base_strategy_list_3_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display; + break; case 4: - DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); - - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_4_display, - base_strategy_list_4_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_4_display; + base_list_size = base_strategy_list_4_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display; + break; } + + DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + + /* populate list */ + pmo_dcn4_fams2_expand_base_pstate_strategies( + base_list, + base_list_size, + i + 1, + expanded_list, + expanded_list_size); } return true; @@ -1026,13 +1049,13 @@ static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo, return synchronizable; } -static unsigned int calc_svp_microschedule(const struct dml2_fams2_meta *fams2_meta) +static unsigned int calc_svp_microschedule(const struct dml2_pstate_meta *pstate_meta) { - return fams2_meta->contention_delay_otg_vlines + - fams2_meta->method_subvp.programming_delay_otg_vlines + - fams2_meta->method_subvp.phantom_vtotal + - fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + - fams2_meta->dram_clk_change_blackout_otg_vlines; + return pstate_meta->contention_delay_otg_vlines + + pstate_meta->method_subvp.programming_delay_otg_vlines + + pstate_meta->method_subvp.phantom_vtotal + + pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + + pstate_meta->blackout_otg_vlines; } static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo, @@ -1042,29 +1065,29 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo, unsigned int i; for (i = 0; i < DML2_MAX_PLANES; i++) { const struct dml2_stream_parameters *stream_descriptor; - const struct dml2_fams2_meta *stream_fams2_meta; + const struct dml2_pstate_meta *stream_pstate_meta; if (is_bit_set_in_bitfield(mask, i)) { stream_descriptor = &display_config->display_config.stream_descriptors[i]; - stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i]; + stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i]; if (!stream_descriptor->timing.drr_config.enabled) return false; /* cannot support required vtotal */ - if (stream_fams2_meta->method_drr.stretched_vtotal > stream_fams2_meta->max_vtotal) { + if (stream_pstate_meta->method_drr.stretched_vtotal > stream_pstate_meta->max_vtotal) { return false; } /* check rr is within bounds */ - if (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min || - stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) { + if (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min || + stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) { return false; } /* check required stretch is allowed */ if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 && - stream_fams2_meta->method_drr.stretched_vtotal - stream_fams2_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) { + stream_pstate_meta->method_drr.stretched_vtotal - stream_pstate_meta->nom_vtotal > (int)stream_descriptor->timing.drr_config.max_instant_vtotal_delta) { return false; } } @@ -1079,7 +1102,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo, { const struct dml2_stream_parameters *stream_descriptor; const struct dml2_plane_parameters *plane_descriptor; - const struct dml2_fams2_meta *stream_fams2_meta; + const struct dml2_pstate_meta *stream_pstate_meta; unsigned int microschedule_vlines; unsigned int i; unsigned int mcaches_per_plane; @@ -1124,13 +1147,13 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo, for (i = 0; i < DML2_MAX_PLANES; i++) { if (is_bit_set_in_bitfield(mask, i)) { stream_descriptor = &display_config->display_config.stream_descriptors[i]; - stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i]; + stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i]; if (stream_descriptor->overrides.disable_subvp) { return false; } - microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_fams2_meta[i]); + microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_pstate_meta[i]); /* block if using an interlaced timing */ if (stream_descriptor->timing.interlaced) { @@ -1141,8 +1164,8 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo, * 2) refresh rate must be within the allowed bounds */ if (microschedule_vlines >= stream_descriptor->timing.v_active || - (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min || - stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) { + (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min || + stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) { return false; } } @@ -1232,43 +1255,43 @@ static bool all_planes_match_method(const struct display_configuation_with_meta } static void build_method_scheduling_params( - struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta, - struct dml2_fams2_meta *stream_fams2_meta) + struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta, + struct dml2_pstate_meta *stream_pstate_meta) { - stream_method_fams2_meta->allow_time_us = - (double)((int)stream_method_fams2_meta->allow_end_otg_vline - (int)stream_method_fams2_meta->allow_start_otg_vline) * - stream_fams2_meta->otg_vline_time_us; - if (stream_method_fams2_meta->allow_time_us >= stream_method_fams2_meta->period_us) { + stream_method_pstate_meta->allow_time_us = + (double)((int)stream_method_pstate_meta->allow_end_otg_vline - (int)stream_method_pstate_meta->allow_start_otg_vline) * + stream_pstate_meta->otg_vline_time_us; + if (stream_method_pstate_meta->allow_time_us >= stream_method_pstate_meta->period_us) { /* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/ - stream_method_fams2_meta->disallow_time_us = 0.0; + stream_method_pstate_meta->disallow_time_us = 0.0; } else { - stream_method_fams2_meta->disallow_time_us = - stream_method_fams2_meta->period_us - stream_method_fams2_meta->allow_time_us; + stream_method_pstate_meta->disallow_time_us = + stream_method_pstate_meta->period_us - stream_method_pstate_meta->allow_time_us; } } -static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta( +static struct dml2_pstate_per_method_common_meta *get_per_method_common_meta( struct dml2_pmo_instance *pmo, enum dml2_pstate_method stream_pstate_method, int stream_idx) { - struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL; + struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta = NULL; switch (stream_pstate_method) { case dml2_pstate_method_vactive: case dml2_pstate_method_fw_vactive_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vactive.common; break; case dml2_pstate_method_vblank: case dml2_pstate_method_fw_vblank_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vblank.common; break; case dml2_pstate_method_fw_svp: case dml2_pstate_method_fw_svp_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_subvp.common; break; case dml2_pstate_method_fw_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_drr.common; break; case dml2_pstate_method_reserved_hw: case dml2_pstate_method_reserved_fw: @@ -1277,10 +1300,10 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta( case dml2_pstate_method_count: case dml2_pstate_method_na: default: - stream_method_fams2_meta = NULL; + stream_method_pstate_meta = NULL; } - return stream_method_fams2_meta; + return stream_method_pstate_meta; } static bool is_timing_group_schedulable( @@ -1288,10 +1311,10 @@ static bool is_timing_group_schedulable( const struct display_configuation_with_meta *display_cfg, const struct dml2_pmo_pstate_strategy *pstate_strategy, const unsigned int timing_group_idx, - struct dml2_fams2_per_method_common_meta *group_fams2_meta) + struct dml2_pstate_per_method_common_meta *group_pstate_meta) { unsigned int i; - struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta; + struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta; unsigned int base_stream_idx = 0; struct dml2_pmo_scratch *s = &pmo->scratch; @@ -1305,31 +1328,31 @@ static bool is_timing_group_schedulable( } /* init allow start and end lines for timing group */ - stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx); - if (!stream_method_fams2_meta) + stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx); + if (!stream_method_pstate_meta) return false; - group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline; - group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline; - group_fams2_meta->period_us = stream_method_fams2_meta->period_us; + group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline; + group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline; + group_pstate_meta->period_us = stream_method_pstate_meta->period_us; for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) { if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) { - stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i); - if (!stream_method_fams2_meta) + stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i); + if (!stream_method_pstate_meta) continue; - if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) { + if (group_pstate_meta->allow_start_otg_vline < stream_method_pstate_meta->allow_start_otg_vline) { /* set group allow start to larger otg vline */ - group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline; + group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline; } - if (group_fams2_meta->allow_end_otg_vline > stream_method_fams2_meta->allow_end_otg_vline) { + if (group_pstate_meta->allow_end_otg_vline > stream_method_pstate_meta->allow_end_otg_vline) { /* set group allow end to smaller otg vline */ - group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline; + group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline; } /* check waveform still has positive width */ - if (group_fams2_meta->allow_start_otg_vline >= group_fams2_meta->allow_end_otg_vline) { + if (group_pstate_meta->allow_start_otg_vline >= group_pstate_meta->allow_end_otg_vline) { /* timing group is not schedulable */ return false; } @@ -1337,10 +1360,10 @@ static bool is_timing_group_schedulable( } /* calculate the rest of the meta */ - build_method_scheduling_params(group_fams2_meta, &pmo->scratch.pmo_dcn4.stream_fams2_meta[base_stream_idx]); + build_method_scheduling_params(group_pstate_meta, &pmo->scratch.pmo_dcn4.stream_pstate_meta[base_stream_idx]); - return group_fams2_meta->allow_time_us > 0.0 && - group_fams2_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us; + return group_pstate_meta->allow_time_us > 0.0 && + group_pstate_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us; } static bool is_config_schedulable( @@ -1354,7 +1377,7 @@ static bool is_config_schedulable( double max_allow_delay_us = 0.0; - memset(s->pmo_dcn4.group_common_fams2_meta, 0, sizeof(s->pmo_dcn4.group_common_fams2_meta)); + memset(s->pmo_dcn4.group_common_pstate_meta, 0, sizeof(s->pmo_dcn4.group_common_pstate_meta)); memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES); /* search for a general solution to the schedule */ @@ -1369,12 +1392,12 @@ static bool is_config_schedulable( for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) { s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i; s->pmo_dcn4.sorted_group_gtl_period_index[i] = i; - if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) { + if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_pstate_meta[i])) { /* synchronized timing group was not schedulable */ schedulable = false; break; } - max_allow_delay_us += s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us; + max_allow_delay_us += s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us; } if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) { @@ -1391,8 +1414,8 @@ static bool is_config_schedulable( bool swapped = false; for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) { - double j_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us; - double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us; + double j_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us; + double jp1_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us; if (j_disallow_us < jp1_disallow_us) { /* swap as A < B */ swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j], @@ -1410,19 +1433,19 @@ static bool is_config_schedulable( * other display, or when >2 streams continue to halve the remaining allow time. */ for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) { - if (s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us <= 0.0) { + if (s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us <= 0.0) { /* this timing group always allows */ continue; } - double max_allow_time_us = s->pmo_dcn4.group_common_fams2_meta[i].allow_time_us; + double max_allow_time_us = s->pmo_dcn4.group_common_pstate_meta[i].allow_time_us; for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) { unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j]; /* stream can't overlap itself */ - if (i != sorted_j && s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us > 0.0) { + if (i != sorted_j && s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us > 0.0) { max_allow_time_us = math_min2( - s->pmo_dcn4.group_common_fams2_meta[sorted_j].allow_time_us, - (max_allow_time_us - s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us) / 2); + s->pmo_dcn4.group_common_pstate_meta[sorted_j].allow_time_us, + (max_allow_time_us - s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us) / 2); if (max_allow_time_us < 0.0) { /* failed exit early */ @@ -1450,8 +1473,8 @@ static bool is_config_schedulable( bool swapped = false; for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) { - double j_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us; - double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us; + double j_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us; + double jp1_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us; if (j_period_us < jp1_period_us) { /* swap as A < B */ swap(s->pmo_dcn4.sorted_group_gtl_period_index[j], @@ -1470,7 +1493,7 @@ static bool is_config_schedulable( unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i]; unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1]; - if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us || + if (s->pmo_dcn4.group_common_pstate_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_pstate_meta[sorted_ip1].period_us || (s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) { schedulable = false; break; @@ -1492,18 +1515,18 @@ static bool is_config_schedulable( /* default period_0 > period_1 */ unsigned int lrg_idx = 0; unsigned int sml_idx = 1; - if (s->pmo_dcn4.group_common_fams2_meta[0].period_us < s->pmo_dcn4.group_common_fams2_meta[1].period_us) { + if (s->pmo_dcn4.group_common_pstate_meta[0].period_us < s->pmo_dcn4.group_common_pstate_meta[1].period_us) { /* period_0 < period_1 */ lrg_idx = 1; sml_idx = 0; } - period_ratio = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us; - shift_per_period = s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio)); - max_shift_us = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us; - max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us; + period_ratio = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us; + shift_per_period = s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio)); + max_shift_us = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us; + max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us; if (shift_per_period > 0.0 && - shift_per_period < s->pmo_dcn4.group_common_fams2_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us && + shift_per_period < s->pmo_dcn4.group_common_pstate_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us && max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) { schedulable = true; } @@ -1646,22 +1669,22 @@ static int get_vactive_pstate_margin(const struct display_configuation_with_meta return min_vactive_margin_us; } -static unsigned int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask) +static int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask) { unsigned char i; - unsigned int max_vactive_fill_us = 0; + int max_vactive_fill_us = 0; for (i = 0; i < DML2_MAX_PLANES; i++) { if (is_bit_set_in_bitfield(plane_mask, i)) { - if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us > max_vactive_fill_us) - max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us; + if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk] > max_vactive_fill_us) + max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk]; } } return max_vactive_fill_us; } -static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, +static void build_pstate_meta_per_stream(struct dml2_pmo_instance *pmo, struct display_configuation_with_meta *display_config, int stream_index) { @@ -1669,7 +1692,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index]; const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index]; const struct dml2_timing_cfg *timing = &stream_descriptor->timing; - struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index]; + struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index]; /* worst case all other streams require some programming at the same time, 0 if only 1 stream */ unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us + @@ -1677,142 +1700,142 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, (display_config->display_config.num_streams - 1); /* common */ - stream_fams2_meta->valid = true; - stream_fams2_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0; - stream_fams2_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active; - stream_fams2_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / - (stream_fams2_meta->nom_vtotal * timing->h_total); - stream_fams2_meta->nom_frame_time_us = - (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us; - stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active; + stream_pstate_meta->valid = true; + stream_pstate_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0; + stream_pstate_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active; + stream_pstate_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / + (stream_pstate_meta->nom_vtotal * timing->h_total); + stream_pstate_meta->nom_frame_time_us = + (double)stream_pstate_meta->nom_vtotal * stream_pstate_meta->otg_vline_time_us; + stream_pstate_meta->vblank_start = timing->v_blank_end + timing->v_active; if (stream_descriptor->timing.drr_config.enabled == true) { if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) { - stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / + stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / ((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9); } else { /* assume min of 48Hz */ - stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / + stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / (48000000.0 * stream_descriptor->timing.h_total) * 1e9); } } else { - stream_fams2_meta->max_vtotal = stream_fams2_meta->nom_vtotal; - } - stream_fams2_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / - (stream_fams2_meta->max_vtotal * timing->h_total); - stream_fams2_meta->max_frame_time_us = - (double)stream_fams2_meta->max_vtotal * stream_fams2_meta->otg_vline_time_us; - - stream_fams2_meta->scheduling_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->contention_delay_otg_vlines = - (unsigned int)math_ceil(contention_delay_us / stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->max_vtotal = stream_pstate_meta->nom_vtotal; + } + stream_pstate_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / + (stream_pstate_meta->max_vtotal * timing->h_total); + stream_pstate_meta->max_frame_time_us = + (double)stream_pstate_meta->max_vtotal * stream_pstate_meta->otg_vline_time_us; + + stream_pstate_meta->scheduling_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->contention_delay_otg_vlines = + (unsigned int)math_ceil(contention_delay_us / stream_pstate_meta->otg_vline_time_us); /* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */ - stream_fams2_meta->allow_to_target_delay_otg_vlines = - (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_fams2_meta->otg_vline_time_us)) + 1; - stream_fams2_meta->min_allow_width_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->allow_to_target_delay_otg_vlines = + (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_pstate_meta->otg_vline_time_us)) + 1; + stream_pstate_meta->min_allow_width_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_pstate_meta->otg_vline_time_us); /* this value should account for urgent latency */ - stream_fams2_meta->dram_clk_change_blackout_otg_vlines = + stream_pstate_meta->blackout_otg_vlines = (unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us / - stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->otg_vline_time_us); /* scheduling params should be built based on the worst case for allow_time:disallow_time */ /* vactive */ if (display_config->display_config.num_streams == 1) { /* for single stream, guarantee at least an instant of allow */ - stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor( + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor( math_max2(0.0, - timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines)); + timing->v_active - math_max2(1.0, stream_pstate_meta->min_allow_width_otg_vlines) - stream_pstate_meta->blackout_otg_vlines)); } else { /* for multi stream, bound to a max fill time defined by IP caps */ - stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = - (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = + (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_pstate_meta->otg_vline_time_us); } - stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us = stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_fams2_meta->otg_vline_time_us; + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us = stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_pstate_meta->otg_vline_time_us; - if (stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) { - stream_fams2_meta->method_vactive.common.allow_start_otg_vline = - timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; - stream_fams2_meta->method_vactive.common.allow_end_otg_vline = - stream_fams2_meta->vblank_start - - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; + if (stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) { + stream_pstate_meta->method_vactive.common.allow_start_otg_vline = + timing->v_blank_end + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; + stream_pstate_meta->method_vactive.common.allow_end_otg_vline = + stream_pstate_meta->vblank_start - + stream_pstate_meta->blackout_otg_vlines; } else { - stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0; - stream_fams2_meta->method_vactive.common.allow_end_otg_vline = 0; + stream_pstate_meta->method_vactive.common.allow_start_otg_vline = 0; + stream_pstate_meta->method_vactive.common.allow_end_otg_vline = 0; } - stream_fams2_meta->method_vactive.common.period_us = stream_fams2_meta->nom_frame_time_us; - build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta); + stream_pstate_meta->method_vactive.common.period_us = stream_pstate_meta->nom_frame_time_us; + build_method_scheduling_params(&stream_pstate_meta->method_vactive.common, stream_pstate_meta); /* vblank */ - stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start; - stream_fams2_meta->method_vblank.common.allow_end_otg_vline = - stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1; - stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us; - build_method_scheduling_params(&stream_fams2_meta->method_vblank.common, stream_fams2_meta); + stream_pstate_meta->method_vblank.common.allow_start_otg_vline = stream_pstate_meta->vblank_start; + stream_pstate_meta->method_vblank.common.allow_end_otg_vline = + stream_pstate_meta->method_vblank.common.allow_start_otg_vline + 1; + stream_pstate_meta->method_vblank.common.period_us = stream_pstate_meta->nom_frame_time_us; + build_method_scheduling_params(&stream_pstate_meta->method_vblank.common, stream_pstate_meta); /* subvp */ - stream_fams2_meta->method_subvp.programming_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_subvp.phantom_vactive = - stream_fams2_meta->allow_to_target_delay_otg_vlines + - stream_fams2_meta->min_allow_width_otg_vlines + + stream_pstate_meta->method_subvp.programming_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_subvp.phantom_vactive = + stream_pstate_meta->allow_to_target_delay_otg_vlines + + stream_pstate_meta->min_allow_width_otg_vlines + stream_info->phantom_min_v_active; - stream_fams2_meta->method_subvp.phantom_vfp = - stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines; + stream_pstate_meta->method_subvp.phantom_vfp = + stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines; /* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/ - stream_fams2_meta->method_subvp.phantom_vtotal = + stream_pstate_meta->method_subvp.phantom_vtotal = stream_info->phantom_v_startup + - stream_fams2_meta->method_subvp.phantom_vfp + + stream_pstate_meta->method_subvp.phantom_vfp + 1 + - stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines + - stream_fams2_meta->method_subvp.phantom_vactive; - stream_fams2_meta->method_subvp.common.allow_start_otg_vline = + stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines + + stream_pstate_meta->method_subvp.phantom_vactive; + stream_pstate_meta->method_subvp.common.allow_start_otg_vline = stream_descriptor->timing.v_blank_end + - stream_fams2_meta->contention_delay_otg_vlines + - stream_fams2_meta->method_subvp.programming_delay_otg_vlines + - stream_fams2_meta->method_subvp.phantom_vtotal + - stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + - stream_fams2_meta->allow_to_target_delay_otg_vlines; - stream_fams2_meta->method_subvp.common.allow_end_otg_vline = - stream_fams2_meta->vblank_start - - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; - stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us; - build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta); + stream_pstate_meta->contention_delay_otg_vlines + + stream_pstate_meta->method_subvp.programming_delay_otg_vlines + + stream_pstate_meta->method_subvp.phantom_vtotal + + stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + + stream_pstate_meta->allow_to_target_delay_otg_vlines; + stream_pstate_meta->method_subvp.common.allow_end_otg_vline = + stream_pstate_meta->vblank_start - + stream_pstate_meta->blackout_otg_vlines; + stream_pstate_meta->method_subvp.common.period_us = stream_pstate_meta->nom_frame_time_us; + build_method_scheduling_params(&stream_pstate_meta->method_subvp.common, stream_pstate_meta); /* drr */ - stream_fams2_meta->method_drr.programming_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_drr.common.allow_start_otg_vline = - stream_fams2_meta->vblank_start + - stream_fams2_meta->allow_to_target_delay_otg_vlines; - stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us; + stream_pstate_meta->method_drr.programming_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_drr.common.allow_start_otg_vline = + stream_pstate_meta->vblank_start + + stream_pstate_meta->allow_to_target_delay_otg_vlines; + stream_pstate_meta->method_drr.common.period_us = stream_pstate_meta->nom_frame_time_us; if (display_config->display_config.num_streams <= 1) { /* only need to stretch vblank for blackout time */ - stream_fams2_meta->method_drr.stretched_vtotal = - stream_fams2_meta->nom_vtotal + - stream_fams2_meta->allow_to_target_delay_otg_vlines + - stream_fams2_meta->min_allow_width_otg_vlines + - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; + stream_pstate_meta->method_drr.stretched_vtotal = + stream_pstate_meta->nom_vtotal + + stream_pstate_meta->allow_to_target_delay_otg_vlines + + stream_pstate_meta->min_allow_width_otg_vlines + + stream_pstate_meta->blackout_otg_vlines; } else { /* multi display needs to always be schedulable */ - stream_fams2_meta->method_drr.stretched_vtotal = - stream_fams2_meta->nom_vtotal * 2 + - stream_fams2_meta->allow_to_target_delay_otg_vlines + - stream_fams2_meta->min_allow_width_otg_vlines + - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; - } - stream_fams2_meta->method_drr.common.allow_end_otg_vline = - stream_fams2_meta->method_drr.stretched_vtotal - - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; - build_method_scheduling_params(&stream_fams2_meta->method_drr.common, stream_fams2_meta); + stream_pstate_meta->method_drr.stretched_vtotal = + stream_pstate_meta->nom_vtotal * 2 + + stream_pstate_meta->allow_to_target_delay_otg_vlines + + stream_pstate_meta->min_allow_width_otg_vlines + + stream_pstate_meta->blackout_otg_vlines; + } + stream_pstate_meta->method_drr.common.allow_end_otg_vline = + stream_pstate_meta->method_drr.stretched_vtotal - + stream_pstate_meta->blackout_otg_vlines; + build_method_scheduling_params(&stream_pstate_meta->method_drr.common, stream_pstate_meta); } static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo, @@ -1820,14 +1843,14 @@ static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo, int stream_index) { struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index]; - struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index]; + struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index]; stream_svp_meta->valid = true; /* PMO FAMS2 precaulcates these values */ - stream_svp_meta->v_active = stream_fams2_meta->method_subvp.phantom_vactive; - stream_svp_meta->v_front_porch = stream_fams2_meta->method_subvp.phantom_vfp; - stream_svp_meta->v_total = stream_fams2_meta->method_subvp.phantom_vtotal; + stream_svp_meta->v_active = stream_pstate_meta->method_subvp.phantom_vactive; + stream_svp_meta->v_front_porch = stream_pstate_meta->method_subvp.phantom_vfp; + stream_svp_meta->v_total = stream_pstate_meta->method_subvp.phantom_vtotal; } bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out) @@ -1879,7 +1902,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index); /* FAMS2 meta */ - build_fams2_meta_per_stream(pmo, display_config, stream_index); + build_pstate_meta_per_stream(pmo, display_config, stream_index); /* SVP meta */ build_subvp_meta_per_stream(pmo, display_config, stream_index); @@ -1939,9 +1962,6 @@ static void reset_display_configuration(struct display_configuation_with_meta *d for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) { display_config->stage3.stream_svp_meta[stream_index].valid = false; - - display_config->display_config.stream_descriptors[stream_index].overrides.minimize_active_latency_hiding = false; - display_config->display_config.overrides.best_effort_min_active_latency_hiding_us = 0; } for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { @@ -1974,7 +1994,6 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta * plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr; display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr; - } } } @@ -2040,7 +2059,6 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met plane->overrides.reserved_vblank_time_ns); display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank; - } } } @@ -2055,6 +2073,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { if (is_bit_set_in_bitfield(plane_mask, plane_index)) { plane = &display_config->display_config.plane_descriptors[plane_index]; + plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000); display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr; @@ -2076,8 +2095,8 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive; if (!pmo->options->disable_vactive_det_fill_bw_pad) { - display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us = - (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); + display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] = + (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); } } } @@ -2097,8 +2116,8 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr; if (!pmo->options->disable_vactive_det_fill_bw_pad) { - display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us = - (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); + display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] = + (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); } } } @@ -2144,9 +2163,9 @@ static bool setup_display_config(struct display_configuation_with_meta *display_ /* copy FAMS2 meta */ if (success) { display_config->stage3.fams2_required = fams2_required; - memcpy(&display_config->stage3.stream_fams2_meta, - &scratch->pmo_dcn4.stream_fams2_meta, - sizeof(struct dml2_fams2_meta) * DML2_MAX_PLANES); + memcpy(&display_config->stage3.stream_pstate_meta, + &scratch->pmo_dcn4.stream_pstate_meta, + sizeof(struct dml2_pstate_meta) * DML2_MAX_PLANES); } return success; @@ -2188,12 +2207,12 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp return false; for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) { - struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index]; + struct dml2_pstate_meta *stream_pstate_meta = &s->pmo_dcn4.stream_pstate_meta[stream_index]; if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive || s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) { if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) || - get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) { + get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) { p_state_supported = false; break; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h index 6baab7ad6ecc..6baab7ad6ecc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c index 55d2464365d0..55d2464365d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h index 7218de1824cc..b90f6263cd85 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h @@ -10,4 +10,4 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out); -#endif +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c index e17b5ceba447..e17b5ceba447 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h index e13b0c5939b0..e13b0c5939b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c index 5a33e2f357f4..5a33e2f357f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c index 5e14d85821e2..5e14d85821e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h index 14d0ae03dce6..14d0ae03dce6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c index 4a7c4c62111e..4a7c4c62111e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h index 53bd8602f9ef..53bd8602f9ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h index 611c80f4f1bf..611c80f4f1bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h index d52aa82283b3..1a6c0727cd2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h @@ -152,7 +152,7 @@ struct core_plane_support_info { int active_latency_hiding_us; int mall_svp_size_requirement_ways; int nominal_vblank_pstate_latency_hiding_us; - unsigned int dram_change_vactive_det_fill_delay_us; + int vactive_det_fill_delay_us[dml2_pstate_type_count]; }; struct core_stream_support_info { @@ -209,6 +209,7 @@ struct dml2_core_mode_support_result { unsigned int uclk_pstate_supported; unsigned int fclk_pstate_supported; + struct dml2_core_internal_watermarks watermarks; } global; struct { @@ -255,56 +256,70 @@ struct dml2_implicit_svp_meta { unsigned long v_front_porch; }; -struct dml2_fams2_per_method_common_meta { +struct dml2_pstate_per_method_common_meta { /* generic params */ - unsigned int allow_start_otg_vline; - unsigned int allow_end_otg_vline; + int allow_start_otg_vline; + int allow_end_otg_vline; /* scheduling params */ double allow_time_us; double disallow_time_us; double period_us; }; -struct dml2_fams2_meta { +struct dml2_pstate_meta { bool valid; double otg_vline_time_us; - unsigned int scheduling_delay_otg_vlines; - unsigned int vertical_interrupt_ack_delay_otg_vlines; - unsigned int allow_to_target_delay_otg_vlines; - unsigned int contention_delay_otg_vlines; - unsigned int min_allow_width_otg_vlines; - unsigned int nom_vtotal; - unsigned int vblank_start; + int scheduling_delay_otg_vlines; + int vertical_interrupt_ack_delay_otg_vlines; + int allow_to_target_delay_otg_vlines; + int contention_delay_otg_vlines; + int min_allow_width_otg_vlines; + int nom_vtotal; + int vblank_start; double nom_refresh_rate_hz; double nom_frame_time_us; - unsigned int max_vtotal; + int max_vtotal; double min_refresh_rate_hz; double max_frame_time_us; - unsigned int dram_clk_change_blackout_otg_vlines; + int blackout_otg_vlines; + int max_allow_delay_otg_vlines; + double nom_vblank_time_us; struct { double max_vactive_det_fill_delay_us; - unsigned int max_vactive_det_fill_delay_otg_vlines; - struct dml2_fams2_per_method_common_meta common; + double vactive_latency_hiding_us; + double reserved_vblank_required_us; + int max_vactive_det_fill_delay_otg_vlines; + int reserved_blank_required_vlines; + struct dml2_pstate_per_method_common_meta common; } method_vactive; struct { - struct dml2_fams2_per_method_common_meta common; + struct dml2_pstate_per_method_common_meta common; } method_vblank; struct { - unsigned int programming_delay_otg_vlines; - unsigned int df_throttle_delay_otg_vlines; - unsigned int prefetch_to_mall_delay_otg_vlines; + int programming_delay_otg_vlines; + int df_throttle_delay_otg_vlines; + int prefetch_to_mall_delay_otg_vlines; unsigned long phantom_vactive; unsigned long phantom_vfp; unsigned long phantom_vtotal; - struct dml2_fams2_per_method_common_meta common; + struct dml2_pstate_per_method_common_meta common; } method_subvp; struct { - unsigned int programming_delay_otg_vlines; - unsigned int stretched_vtotal; - struct dml2_fams2_per_method_common_meta common; + int programming_delay_otg_vlines; + int stretched_vtotal; + struct dml2_pstate_per_method_common_meta common; } method_drr; }; +/* mask of synchronized timings by stream index */ +struct dml2_pmo_synchronized_timing_groups { + unsigned int num_timing_groups; + unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES]; + bool group_is_drr_enabled[DML2_MAX_PLANES]; + bool group_is_drr_active[DML2_MAX_PLANES]; + double group_line_time_us[DML2_MAX_PLANES]; +}; + struct dml2_optimization_stage3_state { bool performed; bool success; @@ -319,7 +334,7 @@ struct dml2_optimization_stage3_state { // Meta-data for FAMS2 bool fams2_required; - struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES]; + struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES]; int min_clk_index_for_latency; }; @@ -472,6 +487,7 @@ struct dml2_core_scratch { }; struct dml2_core_instance { + enum dml2_project_id project_id; struct dml2_mcg_min_clock_table *minimum_clock_table; struct dml2_core_internal_state_inputs inputs; struct dml2_core_internal_state_intermediates intermediates; @@ -619,6 +635,12 @@ struct dml2_pmo_optimize_for_stutter_in_out { #define PMO_DCN4_MAX_NUM_VARIANTS 2 #define PMO_DCN4_MAX_BASE_STRATEGIES 10 +struct dml2_scheduling_check_locals { + struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES]; + unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES]; + unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES]; +}; + struct dml2_pmo_scratch { union { struct { @@ -648,7 +670,7 @@ struct dml2_pmo_scratch { // Stores all the implicit SVP meta information indexed by stream index of the display // configuration under inspection, built at optimization stage init struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES]; - struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES]; + struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES]; unsigned int optimal_vblank_reserved_time_for_stutter_us[DML2_PMO_STUTTER_CANDIDATE_LIST_SIZE]; unsigned int num_stutter_candidates; @@ -663,7 +685,7 @@ struct dml2_pmo_scratch { double group_line_time_us[DML2_MAX_PLANES]; /* scheduling check locals */ - struct dml2_fams2_per_method_common_meta group_common_fams2_meta[DML2_MAX_PLANES]; + struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES]; unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES]; unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES]; double group_phase_offset[DML2_MAX_PLANES]; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c index 4cfe64aa8492..4cfe64aa8492 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h index 1538b708d8be..1538b708d8be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h index 7ca7f2a743c2..7ca7f2a743c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h index 140ec01545db..55b3e3ca54f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h @@ -23,7 +23,7 @@ * Authors: AMD * */ - + #ifndef __DML2_INTERNAL_TYPES_H__ #define __DML2_INTERNAL_TYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c index c59f825cfae9..66040c877d68 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c @@ -24,6 +24,7 @@ * */ + #include "dml2_dc_types.h" #include "dml2_internal_types.h" #include "dml2_utils.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h index 9d64851f54e7..9d64851f54e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c index ef693f608d59..ef693f608d59 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h index e83e05248592..e83e05248592 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c index 3b866e876bf4..d834cb595afa 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c @@ -301,6 +301,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0; break; + case dml_project_dcn401: out->pct_ideal_fabric_bw_after_urgent = 76; //67; out->max_avg_sdp_bw_use_normal_percent = 75; //80; @@ -424,6 +425,8 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, p->in_states->state_array[1].dcfclk_mhz = 1434.0; p->in_states->state_array[1].dram_speed_mts = 1000 * transactions_per_mem_clock; break; + + case dml_project_dcn401: p->in_states->num_states = 2; transactions_per_mem_clock = 16; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h index d764773938f4..d764773938f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c index 9a33158b63bf..9a33158b63bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h index 04fcfe637119..04fcfe637119 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c index 9deb03a18ccc..9deb03a18ccc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h index c384e141cebc..c384e141cebc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h index 17f0972b1af7..17f0972b1af7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h index f7d30b47beff..d459f93cf40b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h @@ -31,3 +31,4 @@ */ #include "os_types.h" #include "cmntypes.h" + diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c index 00d22e542469..00d22e542469 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h index bf491cf0582d..bf491cf0582d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h index 2a2f84e07ca8..7fadbe6d7af4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h @@ -23,6 +23,7 @@ * Authors: AMD * */ + #ifndef __DML_LOGGING_H__ #define __DML_LOGGING_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c index 01480a04f85e..ce91e5d28956 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c @@ -199,6 +199,8 @@ void dpp_reset(struct dpp *dpp_base) memset(&dpp->scl_data, 0, sizeof(dpp->scl_data)); memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data)); + + dpp_base->cursor_offload = false; } @@ -484,10 +486,12 @@ void dpp1_set_cursor_position( cur_en = 0; /* not visible beyond top edge*/ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) { - REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); - - dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + if (!dpp_base->cursor_offload) + REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); } + + dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en; } void dpp1_cnv_set_optional_cursor_attributes( @@ -497,8 +501,13 @@ void dpp1_cnv_set_optional_cursor_attributes( struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); if (attr) { - REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale); + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale); + } + + dpp_base->att.fp_scale_bias.bits.fp_bias = attr->bias; + dpp_base->att.fp_scale_bias.bits.fp_scale = attr->scale; } } diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h index f466182963f7..b12f34345a58 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h @@ -1348,7 +1348,8 @@ struct dcn_dpp_mask { uint32_t CURSOR0_COLOR1; \ uint32_t DPP_CONTROL; \ uint32_t CM_HDR_MULT_COEF; \ - uint32_t CURSOR0_FP_SCALE_BIAS; + uint32_t CURSOR0_FP_SCALE_BIAS; \ + uint32_t OBUF_CONTROL; struct dcn_dpp_registers { DPP_COMMON_REG_VARIABLE_LIST @@ -1450,7 +1451,6 @@ void dpp1_set_degamma( void dpp1_set_degamma_pwl(struct dpp *dpp_base, const struct pwl_params *params); - void dpp_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 4f569cd8a5d6..ef4a16117181 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -84,6 +84,22 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) } } +void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + dpp_reg_state->recout_start = REG_READ(RECOUT_START); + dpp_reg_state->recout_size = REG_READ(RECOUT_SIZE); + dpp_reg_state->scl_horz_filter_scale_ratio = REG_READ(SCL_HORZ_FILTER_SCALE_RATIO); + dpp_reg_state->scl_vert_filter_scale_ratio = REG_READ(SCL_VERT_FILTER_SCALE_RATIO); + dpp_reg_state->scl_mode = REG_READ(SCL_MODE); + dpp_reg_state->cm_control = REG_READ(CM_CONTROL); + dpp_reg_state->dpp_control = REG_READ(DPP_CONTROL); + dpp_reg_state->dscl_control = REG_READ(DSCL_CONTROL); + dpp_reg_state->obuf_control = REG_READ(OBUF_CONTROL); + dpp_reg_state->mpc_size = REG_READ(MPC_SIZE); +} + /*program post scaler scs block in dpp CM*/ void dpp3_program_post_csc( struct dpp *dpp_base, @@ -396,17 +412,21 @@ void dpp3_set_cursor_attributes( } } - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0, - CUR0_ROM_EN, cur_rom_en); + if (!dpp_base->cursor_offload) + REG_UPDATE_3(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0, + CUR0_ROM_EN, cur_rom_en); if (color_format == CURSOR_MODE_MONO) { /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); + + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } } dpp_base->att.cur0_ctl.bits.expansion_mode = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h index f236824126e9..d4a70b4379ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -594,6 +594,8 @@ void dpp3_program_CM_dealpha( void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s); +void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state); + bool dpp3_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c index fa67e54bf94e..8a5aa5e86850 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c @@ -134,6 +134,7 @@ static struct dpp_funcs dcn32_dpp_funcs = { .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, + .dpp_read_reg_state = dpp30_read_reg_state, }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c index f7a373a3d70a..977d83bf7741 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -95,6 +95,7 @@ void dpp35_program_bias_and_scale_fcnv( static struct dpp_funcs dcn35_dpp_funcs = { .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, .dpp_read_state = dpp30_read_state, + .dpp_read_reg_state = dpp30_read_reg_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c index 36187f890d5d..96c2c853de42 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c @@ -248,6 +248,7 @@ static struct dpp_funcs dcn401_dpp_funcs = { .set_optional_cursor_attributes = dpp401_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_read_reg_state = dpp30_read_reg_state, .set_cursor_matrix = dpp401_set_cursor_matrix, }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index 7aab77b58869..62bf7cea21d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -103,17 +103,21 @@ void dpp401_set_cursor_attributes( } } - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0, - CUR0_ROM_EN, cur_rom_en); + if (!dpp_base->cursor_offload) + REG_UPDATE_3(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0, + CUR0_ROM_EN, cur_rom_en); if (color_format == CURSOR_MODE_MONO) { /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); + + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } } dpp_base->att.cur0_ctl.bits.expansion_mode = 0; @@ -132,10 +136,12 @@ void dpp401_set_cursor_position( uint32_t cur_en = pos->enable ? 1 : 0; if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) { - REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); - - dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + if (!dpp_base->cursor_offload) + REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); } + + dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en; } void dpp401_set_optional_cursor_attributes( @@ -145,10 +151,17 @@ void dpp401_set_optional_cursor_attributes( struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); if (attr) { - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale); + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale); + } + + dpp_base->att.fp_scale_bias_g_y.bits.fp_bias_g_y = attr->bias; + dpp_base->att.fp_scale_bias_g_y.bits.fp_scale_g_y = attr->scale; + dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb = attr->bias; + dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb = attr->scale; } } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c index 89f0d999bf35..242f1e6f0d8f 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c @@ -35,6 +35,7 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const static const struct dsc_funcs dcn20_dsc_funcs = { .dsc_get_enc_caps = dsc2_get_enc_caps, .dsc_read_state = dsc2_read_state, + .dsc_read_reg_state = dsc2_read_reg_state, .dsc_validate_stream = dsc2_validate_stream, .dsc_set_config = dsc2_set_config, .dsc_get_packed_pps = dsc2_get_packed_pps, @@ -155,6 +156,13 @@ void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source); } +void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state) +{ + struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + + dccg_reg_state->dsc_top_control = REG_READ(DSC_TOP_CONTROL); + dccg_reg_state->dscc_interrupt_control_status = REG_READ(DSCC_INTERRUPT_CONTROL_STATUS); +} bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg) { @@ -407,7 +415,7 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; // Need to find the ceiling value for the slice width - dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h; + dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dsc_padding + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h; // TODO: in addition to validating slice height (pic height must be divisible by slice height), // see what happens when the same condition doesn't apply for slice_width/pic_width. dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h index a9c04fc95bd1..2337c3a97235 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h @@ -606,6 +606,7 @@ bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, uint8_t *dsc_packed_pps); void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); +void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state); bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, struct dsc_optc_config *dsc_optc_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c index 6f4f5a3c4861..e712985f7abd 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c @@ -28,10 +28,11 @@ #include "reg_helper.h" static void dsc35_enable(struct display_stream_compressor *dsc, int opp_pipe); +static void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); static const struct dsc_funcs dcn35_dsc_funcs = { - .dsc_get_enc_caps = dsc2_get_enc_caps, .dsc_read_state = dsc2_read_state, + .dsc_read_reg_state = dsc2_read_reg_state, .dsc_validate_stream = dsc2_validate_stream, .dsc_set_config = dsc2_set_config, .dsc_get_packed_pps = dsc2_get_packed_pps, @@ -39,6 +40,7 @@ static const struct dsc_funcs dcn35_dsc_funcs = { .dsc_disable = dsc2_disable, .dsc_disconnect = dsc2_disconnect, .dsc_wait_disconnect_pending_clear = dsc2_wait_disconnect_pending_clear, + .dsc_get_single_enc_caps = dsc35_get_single_enc_caps, }; /* Macro definitios for REG_SET macros*/ @@ -110,3 +112,31 @@ void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable) { REG_UPDATE(DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, !enable); } + +void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz) +{ + dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */ + + dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1; + + dsc_enc_caps->lb_bit_depth = 13; + dsc_enc_caps->is_block_pred_supported = true; + + dsc_enc_caps->color_formats.bits.RGB = 1; + dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; + dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; + + dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1; + dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1; + dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1; + + dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000; + + dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */ + dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */ +} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c index 7bd92ae8b13e..c1bdbb38c690 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c @@ -26,6 +26,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = { .dsc_disconnect = dsc401_disconnect, .dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear, .dsc_get_single_enc_caps = dsc401_get_single_enc_caps, + .dsc_read_reg_state = dsc2_read_reg_state }; /* Macro definitios for REG_SET macros*/ diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h index b0bd1f9425b5..81c83d5fe042 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h @@ -41,6 +41,7 @@ struct dsc_config { enum dc_color_depth color_depth; /* Bits per component */ bool is_odm; struct dc_dsc_config dc_dsc_cfg; + uint32_t dsc_padding; }; @@ -65,6 +66,10 @@ struct dcn_dsc_state { uint32_t dsc_opp_source; }; +struct dcn_dsc_reg_state { + uint32_t dsc_top_control; + uint32_t dscc_interrupt_control_status; +}; /* DSC encoder capabilities * They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps. @@ -99,6 +104,7 @@ struct dsc_enc_caps { struct dsc_funcs { void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); + void (*dsc_read_reg_state)(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state); bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, struct dsc_optc_config *dsc_optc_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c index e7e5f6d4778e..181a93dc46e6 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c @@ -440,33 +440,15 @@ void hubbub3_init_watermarks(struct hubbub *hubbub) REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg); } -void hubbub3_get_det_sizes(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes) +void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - REG_GET_2(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &curr_det_sizes[0], - DET0_SIZE, &target_det_sizes[0]); - - REG_GET_2(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &curr_det_sizes[1], - DET1_SIZE, &target_det_sizes[1]); - - REG_GET_2(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &curr_det_sizes[2], - DET2_SIZE, &target_det_sizes[2]); - - REG_GET_2(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &curr_det_sizes[3], - DET3_SIZE, &target_det_sizes[3]); - -} - -uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t compbuf_config_error = 0; - - REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, - &compbuf_config_error); - - return compbuf_config_error; + hubbub_reg_state->det0_ctrl = REG_READ(DCHUBBUB_DET0_CTRL); + hubbub_reg_state->det1_ctrl = REG_READ(DCHUBBUB_DET1_CTRL); + hubbub_reg_state->det2_ctrl = REG_READ(DCHUBBUB_DET2_CTRL); + hubbub_reg_state->det3_ctrl = REG_READ(DCHUBBUB_DET3_CTRL); + hubbub_reg_state->compbuf_ctrl = REG_READ(DCHUBBUB_COMPBUF_CTRL); } static const struct hubbub_funcs hubbub30_funcs = { @@ -486,8 +468,7 @@ static const struct hubbub_funcs hubbub30_funcs = { .force_pstate_change_control = hubbub3_force_pstate_change_control, .init_watermarks = hubbub3_init_watermarks, .hubbub_read_state = hubbub2_read_state, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub3_construct(struct dcn20_hubbub *hubbub3, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h index 49a469969d36..9e14de3ccaee 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h @@ -133,10 +133,6 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub, void hubbub3_init_watermarks(struct hubbub *hubbub); -void hubbub3_get_det_sizes(struct hubbub *hubbub, - uint32_t *curr_det_sizes, - uint32_t *target_det_sizes); - -uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub); +void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state); #endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c index cdb20251a154..5a03758e3de6 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c @@ -933,8 +933,8 @@ int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub, dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config); } - - dcn21_dchvm_init(hubbub); + if (hubbub->funcs->dchvm_init) + hubbub->funcs->dchvm_init(hubbub); return NUM_VMID; } @@ -1071,8 +1071,8 @@ static const struct hubbub_funcs hubbub31_funcs = { .program_compbuf_size = dcn31_program_compbuf_size, .init_crb = dcn31_init_crb, .hubbub_read_state = hubbub2_read_state, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state, + .dchvm_init = dcn21_dchvm_init }; void hubbub31_construct(struct dcn20_hubbub *hubbub31, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c index 4d4ca6d77bbd..237331b35378 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c @@ -1037,8 +1037,7 @@ static const struct hubbub_funcs hubbub32_funcs = { .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, .set_request_limit = hubbub32_set_request_limit, .get_mall_en = hubbub32_get_mall_en, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub32_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c index a443722a8632..43ba399f4822 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c @@ -549,6 +549,55 @@ void hubbub35_init(struct hubbub *hubbub) memset(&hubbub2->watermarks.a.cstate_pstate, 0, sizeof(hubbub2->watermarks.a.cstate_pstate)); } +void dcn35_dchvm_init(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t riommu_active; + int i; + + //Init DCHVM block + REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1); + + //Poll until RIOMMU_ACTIVE = 1 + for (i = 0; i < 100; i++) { + REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active); + + if (riommu_active) + break; + else + udelay(5); + } + + if (riommu_active) { + // Disable gating and memory power requests + REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 1); + REG_UPDATE_4(DCHVM_CLK_CTRL, + HVM_DISPCLK_R_GATE_DIS, 1, + HVM_DISPCLK_G_GATE_DIS, 1, + HVM_DCFCLK_R_GATE_DIS, 1, + HVM_DCFCLK_G_GATE_DIS, 1); + + //Reflect the power status of DCHUBBUB + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); + + //Start rIOMMU prefetching + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); + + //Poll until HOSTVM_PREFETCH_DONE = 1 + REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + + //Enable memory power requests + REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 0); + // Enable dynamic clock gating + REG_UPDATE_4(DCHVM_CLK_CTRL, + HVM_DISPCLK_R_GATE_DIS, 0, + HVM_DISPCLK_G_GATE_DIS, 0, + HVM_DCFCLK_R_GATE_DIS, 0, + HVM_DCFCLK_G_GATE_DIS, 0); + hubbub->riommu_active = true; + } +} + /*static void hubbub35_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel) @@ -589,8 +638,8 @@ static const struct hubbub_funcs hubbub35_funcs = { .hubbub_read_state = hubbub2_read_state, .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, .dchubbub_init = hubbub35_init, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state, + .dchvm_init = dcn35_dchvm_init }; void hubbub35_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h index 23fecf88556c..9f65fff1bd4d 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h @@ -168,4 +168,5 @@ void dcn35_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase); void dcn35_init_crb(struct hubbub *hubbub); void hubbub35_init(struct hubbub *hubbub); +void dcn35_dchvm_init(struct hubbub *hubbub); #endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c index a36273a52880..d11afd1ce72a 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c @@ -1247,8 +1247,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = { .program_compbuf_segments = dcn401_program_compbuf_segments, .wait_for_det_update = dcn401_wait_for_det_update, .program_arbiter = dcn401_program_arbiter, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub401_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c index 9b026600b90e..6378e3fd7249 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c @@ -550,6 +550,7 @@ void hubp_reset(struct hubp *hubp) { memset(&hubp->pos, 0, sizeof(hubp->pos)); memset(&hubp->att, 0, sizeof(hubp->att)); + hubp->cursor_offload = false; } void hubp1_program_surface_config( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h index cf2eb9793008..f2571076fc50 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h @@ -105,7 +105,9 @@ SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\ SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\ SRI(HUBP_CLK_CNTL, HUBP, id),\ - SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id) + SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id),\ + SRI(HUBP_MEASURE_WIN_CTRL_DCFCLK, HUBP, id),\ + SRI(HUBP_MEASURE_WIN_CTRL_DPPCLK, HUBP, id) /* Register address initialization macro for ASICs with VM */ #define HUBP_REG_LIST_DCN_VM(id)\ @@ -251,7 +253,19 @@ uint32_t CURSOR_HOT_SPOT; \ uint32_t CURSOR_DST_OFFSET; \ uint32_t HUBP_CLK_CNTL; \ - uint32_t HUBPRET_READ_LINE_VALUE + uint32_t HUBPRET_READ_LINE_VALUE; \ + uint32_t HUBP_MEASURE_WIN_CTRL_DCFCLK; \ + uint32_t HUBP_MEASURE_WIN_CTRL_DPPCLK; \ + uint32_t HUBPRET_INTERRUPT; \ + uint32_t HUBPRET_MEM_PWR_CTRL; \ + uint32_t HUBPRET_MEM_PWR_STATUS; \ + uint32_t HUBPRET_READ_LINE_CTRL0; \ + uint32_t HUBPRET_READ_LINE_CTRL1; \ + uint32_t HUBPRET_READ_LINE0; \ + uint32_t HUBPRET_READ_LINE1; \ + uint32_t HUBPREQ_MEM_PWR_CTRL; \ + uint32_t HUBPREQ_MEM_PWR_STATUS + #define HUBP_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -688,6 +702,123 @@ struct dcn_fl_regs_st { uint32_t lut_fl_mode; uint32_t lut_fl_format; }; +struct dcn_hubp_reg_state { + uint32_t hubp_cntl; + uint32_t mall_config; + uint32_t mall_sub_vp; + uint32_t hubp_req_size_config; + uint32_t hubp_req_size_config_c; + uint32_t vmpg_config; + uint32_t addr_config; + uint32_t pri_viewport_dimension; + uint32_t pri_viewport_dimension_c; + uint32_t pri_viewport_start; + uint32_t pri_viewport_start_c; + uint32_t sec_viewport_dimension; + uint32_t sec_viewport_dimension_c; + uint32_t sec_viewport_start; + uint32_t sec_viewport_start_c; + uint32_t surface_config; + uint32_t tiling_config; + uint32_t clk_cntl; + uint32_t mall_status; + uint32_t measure_win_ctrl_dcfclk; + uint32_t measure_win_ctrl_dppclk; + + uint32_t blank_offset_0; + uint32_t blank_offset_1; + uint32_t cursor_settings; + uint32_t dcn_cur0_ttu_cntl0; + uint32_t dcn_cur0_ttu_cntl1; + uint32_t dcn_cur1_ttu_cntl0; + uint32_t dcn_cur1_ttu_cntl1; + uint32_t dcn_dmdat_vm_cntl; + uint32_t dcn_expansion_mode; + uint32_t dcn_global_ttu_cntl; + uint32_t dcn_surf0_ttu_cntl0; + uint32_t dcn_surf0_ttu_cntl1; + uint32_t dcn_surf1_ttu_cntl0; + uint32_t dcn_surf1_ttu_cntl1; + uint32_t dcn_ttu_qos_wm; + uint32_t dcn_vm_mx_l1_tlb_cntl; + uint32_t dcn_vm_system_aperture_high_addr; + uint32_t dcn_vm_system_aperture_low_addr; + uint32_t dcsurf_flip_control; + uint32_t dcsurf_flip_control2; + uint32_t dcsurf_primary_meta_surface_address; + uint32_t dcsurf_primary_meta_surface_address_c; + uint32_t dcsurf_primary_meta_surface_address_high; + uint32_t dcsurf_primary_meta_surface_address_high_c; + uint32_t dcsurf_primary_surface_address; + uint32_t dcsurf_primary_surface_address_c; + uint32_t dcsurf_primary_surface_address_high; + uint32_t dcsurf_primary_surface_address_high_c; + uint32_t dcsurf_secondary_meta_surface_address; + uint32_t dcsurf_secondary_meta_surface_address_c; + uint32_t dcsurf_secondary_meta_surface_address_high; + uint32_t dcsurf_secondary_meta_surface_address_high_c; + uint32_t dcsurf_secondary_surface_address; + uint32_t dcsurf_secondary_surface_address_c; + uint32_t dcsurf_secondary_surface_address_high; + uint32_t dcsurf_secondary_surface_address_high_c; + uint32_t dcsurf_surface_control; + uint32_t dcsurf_surface_earliest_inuse; + uint32_t dcsurf_surface_earliest_inuse_c; + uint32_t dcsurf_surface_earliest_inuse_high; + uint32_t dcsurf_surface_earliest_inuse_high_c; + uint32_t dcsurf_surface_flip_interrupt; + uint32_t dcsurf_surface_inuse; + uint32_t dcsurf_surface_inuse_c; + uint32_t dcsurf_surface_inuse_high; + uint32_t dcsurf_surface_inuse_high_c; + uint32_t dcsurf_surface_pitch; + uint32_t dcsurf_surface_pitch_c; + uint32_t dst_after_scaler; + uint32_t dst_dimensions; + uint32_t dst_y_delta_drq_limit; + uint32_t flip_parameters_0; + uint32_t flip_parameters_1; + uint32_t flip_parameters_2; + uint32_t flip_parameters_3; + uint32_t flip_parameters_4; + uint32_t flip_parameters_5; + uint32_t flip_parameters_6; + uint32_t hubpreq_mem_pwr_ctrl; + uint32_t hubpreq_mem_pwr_status; + uint32_t nom_parameters_0; + uint32_t nom_parameters_1; + uint32_t nom_parameters_2; + uint32_t nom_parameters_3; + uint32_t nom_parameters_4; + uint32_t nom_parameters_5; + uint32_t nom_parameters_6; + uint32_t nom_parameters_7; + uint32_t per_line_delivery; + uint32_t per_line_delivery_pre; + uint32_t prefetch_settings; + uint32_t prefetch_settings_c; + uint32_t ref_freq_to_pix_freq; + uint32_t uclk_pstate_force; + uint32_t vblank_parameters_0; + uint32_t vblank_parameters_1; + uint32_t vblank_parameters_2; + uint32_t vblank_parameters_3; + uint32_t vblank_parameters_4; + uint32_t vblank_parameters_5; + uint32_t vblank_parameters_6; + uint32_t vmid_settings_0; + + uint32_t hubpret_control; + uint32_t hubpret_interrupt; + uint32_t hubpret_mem_pwr_ctrl; + uint32_t hubpret_mem_pwr_status; + uint32_t hubpret_read_line_ctrl0; + uint32_t hubpret_read_line_ctrl1; + uint32_t hubpret_read_line_status; + uint32_t hubpret_read_line_value; + uint32_t hubpret_read_line0; + uint32_t hubpret_read_line1; +}; struct dcn_hubp_state { struct _vcs_dpi_display_dlg_regs_st dlg_attr; @@ -718,7 +849,6 @@ struct dcn_hubp_state { uint32_t hubp_cntl; uint32_t flip_control; }; - struct dcn10_hubp { struct hubp base; struct dcn_hubp_state state; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c index 91259b896e03..92288de4cc10 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c @@ -613,26 +613,28 @@ void hubp2_cursor_set_attributes( hubp->curs_attr = *attr; - REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, - CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); - REG_UPDATE(CURSOR_SURFACE_ADDRESS, - CURSOR_SURFACE_ADDRESS, attr->address.low_part); - - REG_UPDATE_2(CURSOR_SIZE, - CURSOR_WIDTH, attr->width, - CURSOR_HEIGHT, attr->height); - - REG_UPDATE_4(CURSOR_CONTROL, - CURSOR_MODE, attr->color_format, - CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, - CURSOR_PITCH, hw_pitch, - CURSOR_LINES_PER_CHUNK, lpc); - - REG_SET_2(CURSOR_SETTINGS, 0, - /* no shift of the cursor HDL schedule */ - CURSOR0_DST_Y_OFFSET, 0, - /* used to shift the cursor chunk request deadline */ - CURSOR0_CHUNK_HDL_ADJUST, 3); + if (!hubp->cursor_offload) { + REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, + CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); + REG_UPDATE(CURSOR_SURFACE_ADDRESS, + CURSOR_SURFACE_ADDRESS, attr->address.low_part); + + REG_UPDATE_2(CURSOR_SIZE, + CURSOR_WIDTH, attr->width, + CURSOR_HEIGHT, attr->height); + + REG_UPDATE_4(CURSOR_CONTROL, + CURSOR_MODE, attr->color_format, + CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, + CURSOR_PITCH, hw_pitch, + CURSOR_LINES_PER_CHUNK, lpc); + + REG_SET_2(CURSOR_SETTINGS, 0, + /* no shift of the cursor HDL schedule */ + CURSOR0_DST_Y_OFFSET, 0, + /* used to shift the cursor chunk request deadline */ + CURSOR0_CHUNK_HDL_ADJUST, 3); + } hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part; hubp->att.SURFACE_ADDR = attr->address.low_part; @@ -1059,23 +1061,28 @@ void hubp2_cursor_set_position( cur_en = 0; /* not visible beyond top edge*/ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) { - if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) + bool cursor_not_programmed = hubp->att.SURFACE_ADDR == 0 && hubp->att.SURFACE_ADDR_HIGH == 0; + + if (cur_en && cursor_not_programmed) hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr); - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, cur_en); + if (!hubp->cursor_offload) + REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en); } - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, pos->x, - CURSOR_Y_POSITION, pos->y); + if (!hubp->cursor_offload) { + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, pos->x, + CURSOR_Y_POSITION, pos->y); - REG_SET_2(CURSOR_HOT_SPOT, 0, - CURSOR_HOT_SPOT_X, pos->x_hotspot, - CURSOR_HOT_SPOT_Y, pos->y_hotspot); + REG_SET_2(CURSOR_HOT_SPOT, 0, + CURSOR_HOT_SPOT_X, pos->x_hotspot, + CURSOR_HOT_SPOT_Y, pos->y_hotspot); + + REG_SET(CURSOR_DST_OFFSET, 0, + CURSOR_DST_X_OFFSET, dst_x_offset); + } - REG_SET(CURSOR_DST_OFFSET, 0, - CURSOR_DST_X_OFFSET, dst_x_offset); /* TODO Handle surface pixel formats other than 4:4:4 */ /* Cursor Position Register Config */ hubp->pos.cur_ctl.bits.cur_enable = cur_en; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h index f325db555102..7062e6653062 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h @@ -145,7 +145,8 @@ uint32_t FLIP_PARAMETERS_2;\ uint32_t DCN_CUR1_TTU_CNTL0;\ uint32_t DCN_CUR1_TTU_CNTL1;\ - uint32_t VMID_SETTINGS_0 + uint32_t VMID_SETTINGS_0;\ + uint32_t DST_Y_DELTA_DRQ_LIMIT /*shared with dcn3.x*/ #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ @@ -176,7 +177,10 @@ uint32_t HUBP_3DLUT_CONTROL;\ uint32_t HUBP_3DLUT_DLG_PARAM;\ uint32_t DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE;\ - uint32_t DCHUBP_MCACHEID_CONFIG + uint32_t DCHUBP_MCACHEID_CONFIG;\ + uint32_t DCHUBP_MALL_SUB_VP;\ + uint32_t DCHUBP_ADDR_CONFIG;\ + uint32_t HUBP_MALL_STATUS #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \ DCN_HUBP_REG_FIELD_BASE_LIST(type); \ diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c index e2740482e1cf..08ea0a1b9e7f 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c @@ -73,8 +73,6 @@ * On any mode switch, if the new reg values are smaller than the current values, * then update the regs with the new values. * - * Link to the ticket: http://ontrack-internal.amd.com/browse/DEDCN21-142 - * */ void apply_DEDCN21_142_wa_for_hostvm_deadline( struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c index 556214b2227d..0cc6f4558989 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c @@ -476,6 +476,126 @@ void hubp3_read_state(struct hubp *hubp) } +void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + reg_state->hubp_cntl = REG_READ(DCHUBP_CNTL); + reg_state->mall_config = REG_READ(DCHUBP_MALL_CONFIG); + reg_state->mall_sub_vp = REG_READ(DCHUBP_MALL_SUB_VP); + reg_state->hubp_req_size_config = REG_READ(DCHUBP_REQ_SIZE_CONFIG); + reg_state->hubp_req_size_config_c = REG_READ(DCHUBP_REQ_SIZE_CONFIG_C); + reg_state->vmpg_config = REG_READ(DCHUBP_VMPG_CONFIG); + reg_state->addr_config = REG_READ(DCSURF_ADDR_CONFIG); + reg_state->pri_viewport_dimension = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION); + reg_state->pri_viewport_dimension_c = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION_C); + reg_state->pri_viewport_start = REG_READ(DCSURF_PRI_VIEWPORT_START); + reg_state->pri_viewport_start_c = REG_READ(DCSURF_PRI_VIEWPORT_START_C); + reg_state->sec_viewport_dimension = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION); + reg_state->sec_viewport_dimension_c = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION_C); + reg_state->sec_viewport_start = REG_READ(DCSURF_SEC_VIEWPORT_START); + reg_state->sec_viewport_start_c = REG_READ(DCSURF_SEC_VIEWPORT_START_C); + reg_state->surface_config = REG_READ(DCSURF_SURFACE_CONFIG); + reg_state->tiling_config = REG_READ(DCSURF_TILING_CONFIG); + reg_state->clk_cntl = REG_READ(HUBP_CLK_CNTL); + reg_state->mall_status = REG_READ(HUBP_MALL_STATUS); + reg_state->measure_win_ctrl_dcfclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DCFCLK); + reg_state->measure_win_ctrl_dppclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DPPCLK); + + reg_state->blank_offset_0 = REG_READ(BLANK_OFFSET_0); + reg_state->blank_offset_1 = REG_READ(BLANK_OFFSET_1); + reg_state->cursor_settings = REG_READ(CURSOR_SETTINGS); + reg_state->dcn_cur0_ttu_cntl0 = REG_READ(DCN_CUR0_TTU_CNTL0); + reg_state->dcn_cur0_ttu_cntl1 = REG_READ(DCN_CUR0_TTU_CNTL1); + reg_state->dcn_cur1_ttu_cntl0 = REG_READ(DCN_CUR1_TTU_CNTL0); + reg_state->dcn_cur1_ttu_cntl1 = REG_READ(DCN_CUR1_TTU_CNTL1); + reg_state->dcn_dmdat_vm_cntl = REG_READ(DCN_DMDATA_VM_CNTL); + reg_state->dcn_expansion_mode = REG_READ(DCN_EXPANSION_MODE); + reg_state->dcn_global_ttu_cntl = REG_READ(DCN_GLOBAL_TTU_CNTL); + reg_state->dcn_surf0_ttu_cntl0 = REG_READ(DCN_SURF0_TTU_CNTL0); + reg_state->dcn_surf0_ttu_cntl1 = REG_READ(DCN_SURF0_TTU_CNTL1); + reg_state->dcn_surf1_ttu_cntl0 = REG_READ(DCN_SURF1_TTU_CNTL0); + reg_state->dcn_surf1_ttu_cntl1 = REG_READ(DCN_SURF1_TTU_CNTL1); + reg_state->dcn_ttu_qos_wm = REG_READ(DCN_TTU_QOS_WM); + reg_state->dcn_vm_mx_l1_tlb_cntl = REG_READ(DCN_VM_MX_L1_TLB_CNTL); + reg_state->dcn_vm_system_aperture_high_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR); + reg_state->dcn_vm_system_aperture_low_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_LOW_ADDR); + reg_state->dcsurf_flip_control = REG_READ(DCSURF_FLIP_CONTROL); + reg_state->dcsurf_flip_control2 = REG_READ(DCSURF_FLIP_CONTROL2); + reg_state->dcsurf_primary_meta_surface_address = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS); + reg_state->dcsurf_primary_meta_surface_address_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C); + reg_state->dcsurf_primary_meta_surface_address_high = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_primary_meta_surface_address_high_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_primary_surface_address = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS); + reg_state->dcsurf_primary_surface_address_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_C); + reg_state->dcsurf_primary_surface_address_high = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_primary_surface_address_high_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_secondary_meta_surface_address = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS); + reg_state->dcsurf_secondary_meta_surface_address_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C); + reg_state->dcsurf_secondary_meta_surface_address_high = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_secondary_meta_surface_address_high_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_secondary_surface_address = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS); + reg_state->dcsurf_secondary_surface_address_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_C); + reg_state->dcsurf_secondary_surface_address_high = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_secondary_surface_address_high_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_surface_control = REG_READ(DCSURF_SURFACE_CONTROL); + reg_state->dcsurf_surface_earliest_inuse = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE); + reg_state->dcsurf_surface_earliest_inuse_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_C); + reg_state->dcsurf_surface_earliest_inuse_high = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH); + reg_state->dcsurf_surface_earliest_inuse_high_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C); + reg_state->dcsurf_surface_flip_interrupt = REG_READ(DCSURF_SURFACE_FLIP_INTERRUPT); + reg_state->dcsurf_surface_inuse = REG_READ(DCSURF_SURFACE_INUSE); + reg_state->dcsurf_surface_inuse_c = REG_READ(DCSURF_SURFACE_INUSE_C); + reg_state->dcsurf_surface_inuse_high = REG_READ(DCSURF_SURFACE_INUSE_HIGH); + reg_state->dcsurf_surface_inuse_high_c = REG_READ(DCSURF_SURFACE_INUSE_HIGH_C); + reg_state->dcsurf_surface_pitch = REG_READ(DCSURF_SURFACE_PITCH); + reg_state->dcsurf_surface_pitch_c = REG_READ(DCSURF_SURFACE_PITCH_C); + reg_state->dst_after_scaler = REG_READ(DST_AFTER_SCALER); + reg_state->dst_dimensions = REG_READ(DST_DIMENSIONS); + reg_state->dst_y_delta_drq_limit = REG_READ(DST_Y_DELTA_DRQ_LIMIT); + reg_state->flip_parameters_0 = REG_READ(FLIP_PARAMETERS_0); + reg_state->flip_parameters_1 = REG_READ(FLIP_PARAMETERS_1); + reg_state->flip_parameters_2 = REG_READ(FLIP_PARAMETERS_2); + reg_state->flip_parameters_3 = REG_READ(FLIP_PARAMETERS_3); + reg_state->flip_parameters_4 = REG_READ(FLIP_PARAMETERS_4); + reg_state->flip_parameters_5 = REG_READ(FLIP_PARAMETERS_5); + reg_state->flip_parameters_6 = REG_READ(FLIP_PARAMETERS_6); + reg_state->hubpreq_mem_pwr_ctrl = REG_READ(HUBPREQ_MEM_PWR_CTRL); + reg_state->hubpreq_mem_pwr_status = REG_READ(HUBPREQ_MEM_PWR_STATUS); + reg_state->nom_parameters_0 = REG_READ(NOM_PARAMETERS_0); + reg_state->nom_parameters_1 = REG_READ(NOM_PARAMETERS_1); + reg_state->nom_parameters_2 = REG_READ(NOM_PARAMETERS_2); + reg_state->nom_parameters_3 = REG_READ(NOM_PARAMETERS_3); + reg_state->nom_parameters_4 = REG_READ(NOM_PARAMETERS_4); + reg_state->nom_parameters_5 = REG_READ(NOM_PARAMETERS_5); + reg_state->nom_parameters_6 = REG_READ(NOM_PARAMETERS_6); + reg_state->nom_parameters_7 = REG_READ(NOM_PARAMETERS_7); + reg_state->per_line_delivery = REG_READ(PER_LINE_DELIVERY); + reg_state->per_line_delivery_pre = REG_READ(PER_LINE_DELIVERY_PRE); + reg_state->prefetch_settings = REG_READ(PREFETCH_SETTINGS); + reg_state->prefetch_settings_c = REG_READ(PREFETCH_SETTINGS_C); + reg_state->ref_freq_to_pix_freq = REG_READ(REF_FREQ_TO_PIX_FREQ); + reg_state->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE); + reg_state->vblank_parameters_0 = REG_READ(VBLANK_PARAMETERS_0); + reg_state->vblank_parameters_1 = REG_READ(VBLANK_PARAMETERS_1); + reg_state->vblank_parameters_2 = REG_READ(VBLANK_PARAMETERS_2); + reg_state->vblank_parameters_3 = REG_READ(VBLANK_PARAMETERS_3); + reg_state->vblank_parameters_4 = REG_READ(VBLANK_PARAMETERS_4); + reg_state->vblank_parameters_5 = REG_READ(VBLANK_PARAMETERS_5); + reg_state->vblank_parameters_6 = REG_READ(VBLANK_PARAMETERS_6); + reg_state->vmid_settings_0 = REG_READ(VMID_SETTINGS_0); + reg_state->hubpret_control = REG_READ(HUBPRET_CONTROL); + reg_state->hubpret_interrupt = REG_READ(HUBPRET_INTERRUPT); + reg_state->hubpret_mem_pwr_ctrl = REG_READ(HUBPRET_MEM_PWR_CTRL); + reg_state->hubpret_mem_pwr_status = REG_READ(HUBPRET_MEM_PWR_STATUS); + reg_state->hubpret_read_line_ctrl0 = REG_READ(HUBPRET_READ_LINE_CTRL0); + reg_state->hubpret_read_line_ctrl1 = REG_READ(HUBPRET_READ_LINE_CTRL1); + reg_state->hubpret_read_line_status = REG_READ(HUBPRET_READ_LINE_STATUS); + reg_state->hubpret_read_line_value = REG_READ(HUBPRET_READ_LINE_VALUE); + reg_state->hubpret_read_line0 = REG_READ(HUBPRET_READ_LINE0); + reg_state->hubpret_read_line1 = REG_READ(HUBPRET_READ_LINE1); +} + void hubp3_setup( struct hubp *hubp, struct _vcs_dpi_display_dlg_regs_st *dlg_attr, @@ -505,30 +625,6 @@ void hubp3_init(struct hubp *hubp) hubp_reset(hubp); } -uint32_t hubp3_get_current_read_line(struct hubp *hubp) -{ - uint32_t read_line = 0; - struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - - REG_GET(HUBPRET_READ_LINE_VALUE, - PIPE_READ_LINE, - &read_line); - - return read_line; -} - -unsigned int hubp3_get_underflow_status(struct hubp *hubp) -{ - uint32_t hubp_underflow = 0; - struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - - REG_GET(DCHUBP_CNTL, - HUBP_UNDERFLOW_STATUS, - &hubp_underflow); - - return hubp_underflow; -} - static struct hubp_funcs dcn30_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, @@ -558,8 +654,7 @@ static struct hubp_funcs dcn30_hubp_funcs = { .hubp_soft_reset = hubp1_soft_reset, .hubp_set_flip_int = hubp1_set_flip_int, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, + .hubp_read_reg_state = hubp3_read_reg_state }; bool hubp3_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h index 842f4eb72cc8..c767e9f4f9b3 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h @@ -296,6 +296,8 @@ void hubp3_dmdata_set_attributes( void hubp3_read_state(struct hubp *hubp); +void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state); + void hubp3_init(struct hubp *hubp); void hubp3_clear_tiling(struct hubp *hubp); diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c index 47101847c2b7..189045f85039 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c @@ -110,9 +110,7 @@ static struct hubp_funcs dcn31_hubp_funcs = { .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, + .hubp_read_reg_state = hubp3_read_reg_state, }; bool hubp31_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c index a5f23bb2a76a..a781085b046b 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c @@ -118,29 +118,7 @@ void hubp32_cursor_set_attributes( uint32_t cursor_width = ((attr->width + 63) / 64) * 64; uint32_t cursor_height = attr->height; uint32_t cursor_size = cursor_width * cursor_height; - - hubp->curs_attr = *attr; - - REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, - CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); - REG_UPDATE(CURSOR_SURFACE_ADDRESS, - CURSOR_SURFACE_ADDRESS, attr->address.low_part); - - REG_UPDATE_2(CURSOR_SIZE, - CURSOR_WIDTH, attr->width, - CURSOR_HEIGHT, attr->height); - - REG_UPDATE_4(CURSOR_CONTROL, - CURSOR_MODE, attr->color_format, - CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, - CURSOR_PITCH, hw_pitch, - CURSOR_LINES_PER_CHUNK, lpc); - - REG_SET_2(CURSOR_SETTINGS, 0, - /* no shift of the cursor HDL schedule */ - CURSOR0_DST_Y_OFFSET, 0, - /* used to shift the cursor chunk request deadline */ - CURSOR0_CHUNK_HDL_ADJUST, 3); + bool use_mall_for_cursor; switch (attr->color_format) { case CURSOR_MODE_MONO: @@ -158,11 +136,49 @@ void hubp32_cursor_set_attributes( cursor_size *= 8; break; } + use_mall_for_cursor = cursor_size > 16384 ? 1 : 0; + + hubp->curs_attr = *attr; - if (cursor_size > 16384) - REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true); - else - REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false); + if (!hubp->cursor_offload) { + REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, + CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); + REG_UPDATE(CURSOR_SURFACE_ADDRESS, + CURSOR_SURFACE_ADDRESS, attr->address.low_part); + + REG_UPDATE_2(CURSOR_SIZE, + CURSOR_WIDTH, attr->width, + CURSOR_HEIGHT, attr->height); + + REG_UPDATE_4(CURSOR_CONTROL, + CURSOR_MODE, attr->color_format, + CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, + CURSOR_PITCH, hw_pitch, + CURSOR_LINES_PER_CHUNK, lpc); + + REG_SET_2(CURSOR_SETTINGS, 0, + /* no shift of the cursor HDL schedule */ + CURSOR0_DST_Y_OFFSET, 0, + /* used to shift the cursor chunk request deadline */ + CURSOR0_CHUNK_HDL_ADJUST, 3); + + REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, use_mall_for_cursor); + } + hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part; + hubp->att.SURFACE_ADDR = attr->address.low_part; + hubp->att.size.bits.width = attr->width; + hubp->att.size.bits.height = attr->height; + hubp->att.cur_ctl.bits.mode = attr->color_format; + + hubp->cur_rect.w = attr->width; + hubp->cur_rect.h = attr->height; + + hubp->att.cur_ctl.bits.pitch = hw_pitch; + hubp->att.cur_ctl.bits.line_per_chunk = lpc; + hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION; + hubp->att.settings.bits.dst_y_offset = 0; + hubp->att.settings.bits.chunk_hdl_adjust = 3; + hubp->use_mall_for_cursor = use_mall_for_cursor; } void hubp32_init(struct hubp *hubp) { @@ -206,9 +222,7 @@ static struct hubp_funcs dcn32_hubp_funcs = { .hubp_update_mall_sel = hubp32_update_mall_sel, .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, + .hubp_read_reg_state = hubp3_read_reg_state }; bool hubp32_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c index b140808f21af..79c583e258c7 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c @@ -209,6 +209,7 @@ static struct hubp_funcs dcn35_hubp_funcs = { .dmdata_load = hubp2_dmdata_load, .dmdata_status_done = hubp2_dmdata_status_done, .hubp_read_state = hubp3_read_state, + .hubp_read_reg_state = hubp3_read_reg_state, .hubp_clear_underflow = hubp2_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp35_init, @@ -218,9 +219,6 @@ static struct hubp_funcs dcn35_hubp_funcs = { .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank_value, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, }; bool hubp35_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c index 0fcbc6a35be6..f01eae50d02f 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c @@ -783,21 +783,23 @@ void hubp401_cursor_set_position( if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr); - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, cur_en); + if (!hubp->cursor_offload) + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, cur_en); } - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, x_pos, - CURSOR_Y_POSITION, y_pos); + if (!hubp->cursor_offload) { + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, x_pos, + CURSOR_Y_POSITION, y_pos); - REG_SET_2(CURSOR_HOT_SPOT, 0, - CURSOR_HOT_SPOT_X, pos->x_hotspot, - CURSOR_HOT_SPOT_Y, pos->y_hotspot); - - REG_SET(CURSOR_DST_OFFSET, 0, - CURSOR_DST_X_OFFSET, dst_x_offset); + REG_SET_2(CURSOR_HOT_SPOT, 0, + CURSOR_HOT_SPOT_X, pos->x_hotspot, + CURSOR_HOT_SPOT_Y, pos->y_hotspot); + REG_SET(CURSOR_DST_OFFSET, 0, + CURSOR_DST_X_OFFSET, dst_x_offset); + } /* Cursor Position Register Config */ hubp->pos.cur_ctl.bits.cur_enable = cur_en; hubp->pos.position.bits.x_pos = pos->x; @@ -1071,9 +1073,7 @@ static struct hubp_funcs dcn401_hubp_funcs = { .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done, .hubp_clear_tiling = hubp401_clear_tiling, .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, + .hubp_read_reg_state = hubp3_read_reg_state }; bool hubp401_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h index fdabbeec8ffa..4570b8016de5 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h @@ -31,7 +31,7 @@ #include "dcn30/dcn30_hubp.h" #include "dcn31/dcn31_hubp.h" #include "dcn32/dcn32_hubp.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" #define HUBP_3DLUT_FL_REG_LIST_DCN401(inst)\ SRI_ARR_US(_3DLUT_FL_CONFIG, HUBP, inst),\ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 24184b4eb352..8fe399939220 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -659,6 +659,20 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx) } } +static void +dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct dc_link *link = pipe_ctx->stream->link; + struct dc_bios *bios = link->ctx->dc_bios; + struct bp_encoder_control encoder_control = {0}; + + encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE; + encoder_control.engine_id = link->link_enc->analog_engine; + encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10; + + bios->funcs->encoder_control(bios, &encoder_control); +} + void dce110_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = @@ -671,7 +685,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) uint32_t early_control = 0; struct timing_generator *tg = pipe_ctx->stream_res.tg; - link_hwss->setup_stream_attribute(pipe_ctx); link_hwss->setup_stream_encoder(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); @@ -689,6 +702,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) early_control = lane_count; tg->funcs->set_early_control(tg, early_control); + + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + dce110_dac_encoder_control(pipe_ctx, true); } static enum bp_result link_transmitter_control( @@ -1086,6 +1102,9 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) if (!pipe_ctx->stream) return; + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + return; + dc = pipe_ctx->stream->ctx->dc; clk_mgr = dc->clk_mgr; link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res); @@ -1122,6 +1141,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) if (!pipe_ctx || !pipe_ctx->stream) return; + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + return; + dc = pipe_ctx->stream->ctx->dc; clk_mgr = dc->clk_mgr; link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res); @@ -1196,6 +1218,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } + + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + dce110_dac_encoder_control(pipe_ctx, false); } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -1581,6 +1606,51 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } +static void +dce110_select_crtc_source(struct pipe_ctx *pipe_ctx) +{ + struct dc_link *link = pipe_ctx->stream->link; + struct dc_bios *bios = link->ctx->dc_bios; + struct bp_crtc_source_select crtc_source_select = {0}; + enum engine_id engine_id = link->link_enc->preferred_engine; + uint8_t bit_depth; + + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + engine_id = link->link_enc->analog_engine; + + switch (pipe_ctx->stream->timing.display_color_depth) { + case COLOR_DEPTH_UNDEFINED: + bit_depth = 0; + break; + case COLOR_DEPTH_666: + bit_depth = 6; + break; + default: + case COLOR_DEPTH_888: + bit_depth = 8; + break; + case COLOR_DEPTH_101010: + bit_depth = 10; + break; + case COLOR_DEPTH_121212: + bit_depth = 12; + break; + case COLOR_DEPTH_141414: + bit_depth = 14; + break; + case COLOR_DEPTH_161616: + bit_depth = 16; + break; + } + + crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst; + crtc_source_select.bit_depth = bit_depth; + crtc_source_select.engine_id = engine_id; + crtc_source_select.sink_signal = pipe_ctx->stream->signal; + + bios->funcs->select_crtc_source(bios, &crtc_source_select); +} + enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, @@ -1600,6 +1670,10 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( hws->funcs.disable_stream_gating(dc, pipe_ctx); } + if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) { + dce110_select_crtc_source(pipe_ctx); + } + if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output = {0}; @@ -1679,7 +1753,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.tg->funcs->set_static_screen_control( pipe_ctx->stream_res.tg, event_triggers, 2); - if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) + if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && + !dc_is_rgb_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); @@ -1913,6 +1988,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) bool can_apply_edp_fast_boot = false; bool can_apply_seamless_boot = false; bool keep_edp_vdd_on = false; + bool should_clean_dsc_block = true; struct dc_bios *dcb = dc->ctx->dc_bios; DC_LOGGER_INIT(); @@ -2005,9 +2081,15 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) power_down_all_hw_blocks(dc); /* DSC could be enabled on eDP during VBIOS post. - * To clean up dsc blocks if eDP is in link but not active. + * To clean up dsc blocks if all eDP dpms_off is true. */ - if (edp_link_with_sink && (edp_stream_num == 0)) + for (i = 0; i < edp_stream_num; i++) { + if (!edp_streams[i]->dpms_off) { + should_clean_dsc_block = false; + } + } + + if (should_clean_dsc_block) clean_up_dsc_blocks(dc); disable_vga_and_power_gate_all_controllers(dc); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index e9fe97f0c4ea..fa62e40a9858 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -2245,7 +2245,7 @@ void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) if (lock) delay_cursor_until_vupdate(dc, pipe); - if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) { + if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -3090,6 +3090,9 @@ static void dcn10_update_dchubp_dpp( } if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + if (dc->hwss.abort_cursor_offload_update) + dc->hwss.abort_cursor_offload_update(dc, pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); dc->hwss.set_cursor_position(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 9477c9f9e196..c8ff8ae85a03 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -614,6 +614,14 @@ void dcn20_dpp_pg_control( * DOMAIN11_PGFSM_PWR_STATUS, pwr_status, * 1, 1000); */ + + /* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */ + if (!power_on) { + struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst]; + if (dpp5 && dpp5->funcs->dpp_force_disable_cursor) + dpp5->funcs->dpp_force_disable_cursor(dpp5); + } + break; default: BREAK_TO_DEBUGGER(); @@ -1449,7 +1457,7 @@ void dcn20_pipe_control_lock( !flip_immediate) dcn20_setup_gsl_group_as_lock(dc, pipe, false); - if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) { + if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -1793,6 +1801,9 @@ void dcn20_update_dchubp_dpp( if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + if (dc->hwss.abort_cursor_offload_update) + dc->hwss.abort_cursor_offload_update(dc, pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); dc->hwss.set_cursor_position(pipe_ctx); @@ -3052,8 +3063,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) link_enc->transmitter - TRANSMITTER_UNIPHY_A); } - link_hwss->setup_stream_attribute(pipe_ctx); - if (dc->res_pool->dccg->funcs->set_pixel_rate_div) dc->res_pool->dccg->funcs->set_pixel_rate_div( dc->res_pool->dccg, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index e47ed5571dfd..81bcadf5e57e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -53,7 +53,8 @@ #include "link_service.h" #include "dc_state_priv.h" - +#define TO_DCN_DCCG(dccg)\ + container_of(dccg, struct dcn_dccg, base) #define DC_LOGGER_INIT(logger) @@ -1235,44 +1236,47 @@ void dcn30_get_underflow_debug_data(const struct dc *dc, { struct hubbub *hubbub = dc->res_pool->hubbub; - if (tg) { - uint32_t v_blank_start = 0, v_blank_end = 0; - - out_data->otg_inst = tg->inst; - - tg->funcs->get_scanoutpos(tg, - &v_blank_start, - &v_blank_end, - &out_data->h_position, - &out_data->v_position); - - out_data->otg_frame_count = tg->funcs->get_frame_count(tg); - - out_data->otg_underflow = tg->funcs->is_optc_underflow_occurred(tg); + if (hubbub) { + if (hubbub->funcs->hubbub_read_reg_state) { + hubbub->funcs->hubbub_read_reg_state(hubbub, out_data->hubbub_reg_state); + } } for (int i = 0; i < MAX_PIPES; i++) { struct hubp *hubp = dc->res_pool->hubps[i]; - - if (hubp) { - if (hubp->funcs->hubp_get_underflow_status) - out_data->hubps[i].hubp_underflow = hubp->funcs->hubp_get_underflow_status(hubp); - - if (hubp->funcs->hubp_in_blank) - out_data->hubps[i].hubp_in_blank = hubp->funcs->hubp_in_blank(hubp); - - if (hubp->funcs->hubp_get_current_read_line) - out_data->hubps[i].hubp_readline = hubp->funcs->hubp_get_current_read_line(hubp); - - if (hubp->funcs->hubp_get_det_config_error) - out_data->hubps[i].det_config_error = hubp->funcs->hubp_get_det_config_error(hubp); - } + struct dpp *dpp = dc->res_pool->dpps[i]; + struct output_pixel_processor *opp = dc->res_pool->opps[i]; + struct display_stream_compressor *dsc = dc->res_pool->dscs[i]; + struct mpc *mpc = dc->res_pool->mpc; + struct timing_generator *optc = dc->res_pool->timing_generators[i]; + struct dccg *dccg = dc->res_pool->dccg; + + if (hubp) + if (hubp->funcs->hubp_read_reg_state) + hubp->funcs->hubp_read_reg_state(hubp, out_data->hubp_reg_state[i]); + + if (dpp) + if (dpp->funcs->dpp_read_reg_state) + dpp->funcs->dpp_read_reg_state(dpp, out_data->dpp_reg_state[i]); + + if (opp) + if (opp->funcs->opp_read_reg_state) + opp->funcs->opp_read_reg_state(opp, out_data->opp_reg_state[i]); + + if (dsc) + if (dsc->funcs->dsc_read_reg_state) + dsc->funcs->dsc_read_reg_state(dsc, out_data->dsc_reg_state[i]); + + if (mpc) + if (mpc->funcs->mpc_read_reg_state) + mpc->funcs->mpc_read_reg_state(mpc, i, out_data->mpc_reg_state[i]); + + if (optc) + if (optc->funcs->optc_read_reg_state) + optc->funcs->optc_read_reg_state(optc, out_data->optc_reg_state[i]); + + if (dccg) + if (dccg->funcs->dccg_read_reg_state) + dccg->funcs->dccg_read_reg_state(dccg, out_data->dccg_reg_state[i]); } - - if (hubbub->funcs->get_det_sizes) - hubbub->funcs->get_det_sizes(hubbub, out_data->curr_det_sizes, out_data->target_det_sizes); - - if (hubbub->funcs->compbuf_config_error) - out_data->compbuf_config_error = hubbub->funcs->compbuf_config_error(hubbub); - } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index b822f2dffff0..d1ecdb92b072 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -710,7 +710,8 @@ bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx, panel_cntl->inst, panel_cntl->pwrseq_inst); - dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst); + if (backlight_level_params->control_type != BACKLIGHT_CONTROL_AMD_AUX) + dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index f925f669f2a4..4ee6ed610de0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -108,6 +108,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index f39292952702..bf19ba65d09a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1061,6 +1061,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; if (should_use_dto_dscclk) dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 05011061822c..7aa0f452e8f7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -364,6 +364,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); @@ -816,8 +817,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) { struct dpp *dpp = pipe_ctx->plane_res.dpp; - struct dccg *dccg = dc->res_pool->dccg; - /* enable DCFCLK current DCHUB */ pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); @@ -825,7 +824,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, /* initialize HUBP on power up */ pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); /*make sure DPPCLK is on*/ - dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true); dpp->funcs->dpp_dppclk_control(dpp, false, true); /* make sure OPP_PIPE_CLOCK_EN = 1 */ pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( @@ -859,7 +857,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; - struct dccg *dccg = dc->res_pool->dccg; dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); @@ -878,7 +875,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->funcs->hubp_clk_cntl(hubp, false); dpp->funcs->dpp_dppclk_control(dpp, false, false); - dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false); hubp->power_gated = true; @@ -1592,3 +1588,141 @@ void dcn35_hardware_release(struct dc *dc) if (dc->hwss.hw_block_power_up) dc->hwss.hw_block_power_up(dc, &pg_update_state); } + +void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe) +{ + if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) + return; + + /* + * Insert a blank update to modify the write index and set pipe_mask to 0. + * + * While the DMU is interlocked with driver full pipe programming via + * the DMU HW lock, if the cursor update begins to execute after a full + * pipe programming occurs there are two possible issues: + * + * 1. Outdated cursor information is programmed, replacing the current update + * 2. The cursor update in firmware holds the cursor lock, preventing + * the current update from being latched atomically in the same frame + * as the rest of the update. + * + * This blank update, treated as a no-op, will allow the firmware to skip + * the programming. + */ + + if (dc->hwss.begin_cursor_offload_update) + dc->hwss.begin_cursor_offload_update(dc, pipe); + + if (dc->hwss.commit_cursor_offload_update) + dc->hwss.commit_cursor_offload_update(dc, pipe); +} + +void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + uint32_t stream_idx, write_idx, payload_idx; + + if (!top_pipe) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_start = write_idx; + + if (pipe->plane_res.hubp) + pipe->plane_res.hubp->cursor_offload = true; + + if (pipe->plane_res.dpp) + pipe->plane_res.dpp->cursor_offload = true; +} + +void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + volatile struct dmub_shared_state_cursor_offload_stream_v1 *shared_stream; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + uint32_t stream_idx, write_idx, payload_idx; + + if (pipe->plane_res.hubp) + pipe->plane_res.hubp->cursor_offload = false; + + if (pipe->plane_res.dpp) + pipe->plane_res.dpp->cursor_offload = false; + + if (!top_pipe) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + shared_stream = &dc->ctx->dmub_srv->dmub->shared_state[DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1] + .data.cursor_offload_v1.offload_streams[stream_idx]; + + shared_stream->last_write_idx = write_idx; + + cs->offload_streams[stream_idx].write_idx = write_idx; + cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_finish = write_idx; +} + +void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + const struct hubp *hubp = pipe->plane_res.hubp; + const struct dpp *dpp = pipe->plane_res.dpp; + volatile struct dmub_cursor_offload_pipe_data_dcn30_v1 *p; + uint32_t stream_idx, write_idx, payload_idx; + + if (!top_pipe || !hubp || !dpp) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn30; + + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR; + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot; + p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk; + + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable; + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode; + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode; + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en; + p->CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000; + p->CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF; + p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS = dpp->att.fp_scale_bias.bits.fp_bias; + p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE = dpp->att.fp_scale_bias.bits.fp_scale; + + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset; + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust; + + cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx); +} + +void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream) +{ + dc_dmub_srv_control_cursor_offload(dc, context, stream, true); +} + +void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe) +{ + dc_dmub_srv_program_cursor_now(dc, pipe); +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 0b1d6f608edd..1ff41dba556c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -101,4 +101,12 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx); void dcn35_hardware_release(struct dc *dc); +void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream); +void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe); + #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index f2f16a0bdb4f..5a66c9db2670 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -86,6 +86,12 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .abort_cursor_offload_update = dcn35_abort_cursor_offload_update, + .begin_cursor_offload_update = dcn35_begin_cursor_offload_update, + .commit_cursor_offload_update = dcn35_commit_cursor_offload_update, + .update_cursor_offload_pipe = dcn35_update_cursor_offload_pipe, + .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update, + .program_cursor_offload_now = dcn35_program_cursor_offload_now, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index ce3d0b45fb4c..2fbc22afb89c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -26,9 +26,11 @@ #include "clk_mgr.h" #include "dsc.h" #include "link_service.h" +#include "custom_float.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn10/dcn10_cm_common.h" +#include "dcn10/dcn10_hubbub.h" #include "dcn20/dcn20_optc.h" #include "dcn30/dcn30_cm_common.h" #include "dcn32/dcn32_hwseq.h" @@ -36,6 +38,7 @@ #include "dcn401/dcn401_resource.h" #include "dc_state_priv.h" #include "link_enc_cfg.h" +#include "../hw_sequencer.h" #define DC_LOGGER_INIT(logger) @@ -971,8 +974,6 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) } } - link_hwss->setup_stream_attribute(pipe_ctx); - if (dc->res_pool->dccg->funcs->set_pixel_rate_div) { dc->res_pool->dccg->funcs->set_pixel_rate_div( dc->res_pool->dccg, @@ -1407,9 +1408,9 @@ void dcn401_prepare_bandwidth(struct dc *dc, } if (dc->debug.fams2_config.bits.enable) { - dcn401_fams2_global_control_lock(dc, context, true); + dcn401_dmub_hw_control_lock(dc, context, true); dcn401_fams2_update_config(dc, context, false); - dcn401_fams2_global_control_lock(dc, context, false); + dcn401_dmub_hw_control_lock(dc, context, false); } if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) { @@ -1428,9 +1429,9 @@ void dcn401_optimize_bandwidth( /* enable fams2 if needed */ if (dc->debug.fams2_config.bits.enable) { - dcn401_fams2_global_control_lock(dc, context, true); + dcn401_dmub_hw_control_lock(dc, context, true); dcn401_fams2_update_config(dc, context, true); - dcn401_fams2_global_control_lock(dc, context, false); + dcn401_dmub_hw_control_lock(dc, context, false); } /* program dchubbub watermarks */ @@ -1469,14 +1470,17 @@ void dcn401_optimize_bandwidth( } } -void dcn401_fams2_global_control_lock(struct dc *dc, +void dcn401_dmub_hw_control_lock(struct dc *dc, struct dc_state *context, bool lock) { /* use always for now */ union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; - if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable) + if (!dc->ctx || !dc->ctx->dmub_srv) + return; + + if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc)) return; hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; @@ -1486,12 +1490,12 @@ void dcn401_fams2_global_control_lock(struct dc *dc, dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); } -void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params) +void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params) { - struct dc *dc = params->fams2_global_control_lock_fast_params.dc; - bool lock = params->fams2_global_control_lock_fast_params.lock; + struct dc *dc = params->dmub_hw_control_lock_fast_params.dc; + bool lock = params->dmub_hw_control_lock_fast_params.lock; - if (params->fams2_global_control_lock_fast_params.is_required) { + if (params->dmub_hw_control_lock_fast_params.is_required) { union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; @@ -1598,6 +1602,143 @@ void dcn401_update_odm(struct dc *dc, struct dc_state *context, dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true); } +static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context, + struct pipe_ctx *otg_master, struct block_sequence_state *seq_state) +{ + struct pipe_ctx *old_pipe; + struct pipe_ctx *new_pipe; + struct pipe_ctx *old_opp_heads[MAX_PIPES]; + struct pipe_ctx *old_otg_master; + int old_opp_head_count = 0; + int i; + + old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx]; + + if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) { + old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master, + &dc->current_state->res_ctx, + old_opp_heads); + } else { + old_otg_master = NULL; + } + + /* Process new DSC configuration if DSC is enabled */ + if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) { + struct dc_stream_state *stream = otg_master->stream; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + int last_dsc_calc = 0; + bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) && + stream->timing.pix_clk_100hz > 480000; + + /* Count ODM pipes */ + for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt; + + /* Step 1: Set DTO DSCCLK for main DSC if needed */ + if (should_use_dto_dscclk) { + hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg, + otg_master->stream_res.dsc->inst, num_slices_h); + } + + /* Step 2: Calculate and set DSC config for main DSC */ + last_dsc_calc = *seq_state->num_steps; + hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt); + + /* Step 3: Enable main DSC block */ + hwss_add_dsc_enable_with_opp(seq_state, otg_master); + + /* Step 4: Configure and enable ODM DSC blocks */ + for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + if (!odm_pipe->stream_res.dsc) + continue; + + /* Set DTO DSCCLK for ODM DSC if needed */ + if (should_use_dto_dscclk) { + hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg, + odm_pipe->stream_res.dsc->inst, num_slices_h); + } + + /* Calculate and set DSC config for ODM DSC */ + last_dsc_calc = *seq_state->num_steps; + hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt); + + /* Enable ODM DSC block */ + hwss_add_dsc_enable_with_opp(seq_state, odm_pipe); + } + + /* Step 5: Configure DSC in timing generator */ + hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, + &seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true); + } else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) { + /* Disable DSC in OPTC */ + hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false); + + hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc); + } + + /* Disable DSC for old pipes that no longer need it */ + if (old_otg_master && old_otg_master->stream_res.dsc) { + for (i = 0; i < old_opp_head_count; i++) { + old_pipe = old_opp_heads[i]; + new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx]; + + /* If old pipe had DSC but new pipe doesn't, disable the old DSC */ + if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) { + /* Then disconnect DSC block */ + hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc); + } + } + } +} + +void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context, + struct pipe_ctx *otg_master, struct block_sequence_state *seq_state) +{ + struct pipe_ctx *opp_heads[MAX_PIPES]; + int opp_inst[MAX_PIPES] = {0}; + int opp_head_count; + int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false); + int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true); + int i; + + opp_head_count = resource_get_opp_heads_for_otg_master( + otg_master, &context->res_ctx, opp_heads); + + for (i = 0; i < opp_head_count; i++) + opp_inst[i] = opp_heads[i]->stream_res.opp->inst; + + /* Add ODM combine/bypass operation to sequence */ + if (opp_head_count > 1) { + hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst, + opp_head_count, odm_slice_width, last_odm_slice_width); + } else { + hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing); + } + + /* Add OPP operations to sequence */ + for (i = 0; i < opp_head_count; i++) { + /* Add OPP pipe clock control operation */ + hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true); + + /* Add OPP program left edge extra pixel operation */ + hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp, + opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER)); + } + + /* Add DSC update operations to sequence */ + dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state); + + /* Add blank pixel data operation if needed */ + if (!resource_is_pipe_type(otg_master, DPP_PIPE)) { + if (dc->hwseq->funcs.blank_pixel_data_sequence) + dc->hwseq->funcs.blank_pixel_data_sequence( + dc, otg_master, true, seq_state); + } +} + void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { @@ -2086,6 +2227,157 @@ void dcn401_program_pipe( } } +/* + * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe + * + * This function creates a sequence-based version of the original dcn401_program_pipe + * function. Instead of directly calling hardware programming functions, it appends + * sequence steps to the provided block_sequence array that can later be executed + * as part of hwss_execute_sequence. + * + */ +void dcn401_program_pipe_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + + /* Only need to unblank on top pipe */ + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) { + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.odm || + pipe_ctx->stream->update_flags.bits.abm_level) { + if (dc->hwseq->funcs.blank_pixel_data_sequence) + dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx, + !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible, + seq_state); + } + } + + /* Only update TG on top pipe */ + if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe + && !pipe_ctx->prev_odm_pipe) { + + /* Step 1: Program global sync */ + hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg, + dcn401_calculate_vready_offset_for_group(pipe_ctx), + (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines, + (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels, + (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels, + (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines); + + /* Step 2: Wait for VACTIVE state (if not phantom pipe) */ + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) + hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + + /* Step 3: Set VTG params */ + hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); + + /* Step 4: Setup vupdate interrupt (if available) */ + if (hws->funcs.setup_vupdate_interrupt) + dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state); + } + + if (pipe_ctx->update_flags.bits.odm) { + if (hws->funcs.update_odm_sequence) + hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state); + } + + if (pipe_ctx->update_flags.bits.enable) { + if (dc->hwss.enable_plane_sequence) + dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state); + } + + if (pipe_ctx->update_flags.bits.det_size) { + if (dc->res_pool->hubbub->funcs->program_det_size) { + hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub, + pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); + } + + if (dc->res_pool->hubbub->funcs->program_det_segments) { + hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub, + pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); + } + } + + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw || + pipe_ctx->plane_state->update_flags.raw || + pipe_ctx->stream->update_flags.raw)) { + + if (dc->hwss.update_dchubp_dpp_sequence) + dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state); + } + + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || + pipe_ctx->plane_state->update_flags.bits.hdr_mult)) { + + hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state); + } + + if (pipe_ctx->plane_state && + (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change || + pipe_ctx->plane_state->update_flags.bits.lut_3d || + pipe_ctx->update_flags.bits.enable)) { + + hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state); + } + + /* dcn10_translate_regamma_to_hw_format takes 750us to finish + * only do gamma programming for powering on, internal memcmp to avoid + * updating on slave planes + */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->stream->update_flags.bits.out_tf) { + hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream); + } + + /* If the pipe has been enabled or has a different opp, we + * should reprogram the fmt. This deals with cases where + * interation between mpc and odm combine on different streams + * causes a different pipe to be chosen to odm combine with. + */ + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->update_flags.bits.opp_changed) { + + hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601, + pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal); + + hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp, + &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping); + } + + /* Set ABM pipe after other pipe configurations done */ + if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) { + if (pipe_ctx->stream_res.abm) { + hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx); + + hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level); + } + } + + if (pipe_ctx->update_flags.bits.test_pattern_changed) { + struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp; + + hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx); + + hwss_add_opp_set_disp_pattern_generator(seq_state, + odm_opp, + pipe_ctx->stream_res.test_pattern_params.test_pattern, + pipe_ctx->stream_res.test_pattern_params.color_space, + pipe_ctx->stream_res.test_pattern_params.color_depth, + (struct tg_color){0}, + false, + pipe_ctx->stream_res.test_pattern_params.width, + pipe_ctx->stream_res.test_pattern_params.height, + pipe_ctx->stream_res.test_pattern_params.offset); + } + +} + void dcn401_program_front_end_for_ctx( struct dc *dc, struct dc_state *context) @@ -2163,7 +2455,6 @@ void dcn401_program_front_end_for_ctx( && context->res_ctx.pipe_ctx[i].stream) hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); - /* Disconnect mpcc */ for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable @@ -2242,11 +2533,11 @@ void dcn401_program_front_end_for_ctx( /* Avoid underflow by check of pipe line read when adding 2nd plane. */ if (hws->wa.wait_hubpret_read_start_during_mpo_transition && - !pipe->top_pipe && - pipe->stream && - pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start && - dc->current_state->stream_status[0].plane_count == 1 && - context->stream_status[0].plane_count > 1) { + !pipe->top_pipe && + pipe->stream && + pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start && + dc->current_state->stream_status[0].plane_count == 1 && + context->stream_status[0].plane_count > 1) { pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); } } @@ -2358,7 +2649,6 @@ void dcn401_post_unlock_program_front_end( */ if (hwseq->funcs.update_force_pstate) dc->hwseq->funcs.update_force_pstate(dc, context); - /* Only program the MALL registers after all the main and phantom pipes * are done programming. */ @@ -2672,3 +2962,1084 @@ void dcn401_plane_atomic_power_down(struct dc *dc, if (hws->funcs.dpp_root_clock_control) hws->funcs.dpp_root_clock_control(hws, dpp->inst, false); } + +void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + const struct hubp *hubp = pipe->plane_res.hubp; + const struct dpp *dpp = pipe->plane_res.dpp; + volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p; + uint32_t stream_idx, write_idx, payload_idx; + + if (!top_pipe || !hubp || !dpp) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401; + + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR; + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot; + p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk; + + p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable; + p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode; + p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode; + p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en; + p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000; + p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF; + + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y = + dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y; + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y = + dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y; + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB = + dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb; + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB = + dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb; + + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset; + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust; + p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor; + + cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx); +} + +void dcn401_plane_atomic_power_down_sequence(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + uint32_t org_ip_request_cntl = 0; + + DC_LOGGER_INIT(dc->ctx->logger); + + /* Check and set DC_IP_REQUEST_CNTL if needed */ + if (REG(DC_IP_REQUEST_CNTL)) { + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + if (org_ip_request_cntl == 0) + hwss_add_dc_ip_request_cntl(seq_state, dc, true); + } + + /* DPP power gating control */ + hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false); + + /* HUBP power gating control */ + hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false); + + /* HUBP reset */ + hwss_add_hubp_reset(seq_state, hubp); + + /* DPP reset */ + hwss_add_dpp_reset(seq_state, dpp); + + /* Restore DC_IP_REQUEST_CNTL if it was originally 0 */ + if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL)) + hwss_add_dc_ip_request_cntl(seq_state, dc, false); + + DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst); + + /* DPP root clock control */ + hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false); +} + +/* trigger HW to start disconnect plane from stream on the next vsync using block sequence */ +void dcn401_plane_atomic_disconnect_sequence(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + int dpp_id = pipe_ctx->plane_res.dpp->inst; + struct mpc *mpc = dc->res_pool->mpc; + struct mpc_tree *mpc_tree_params; + struct mpcc *mpcc_to_remove = NULL; + struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; + + mpc_tree_params = &(opp->mpc_tree_params); + mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id); + + /*Already reset*/ + if (mpcc_to_remove == NULL) + return; + + /* Step 1: Remove MPCC from MPC tree */ + hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove); + + // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle, + // so don't wait for MPCC_IDLE in the programming sequence + if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) { + /* Step 2: Set MPCC disconnect pending flag */ + hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true); + } + + /* Step 3: Set optimized required flag */ + hwss_add_dc_set_optimized_required(seq_state, dc, true); + + /* Step 4: Disconnect HUBP if function exists */ + if (hubp->funcs->hubp_disconnect) + hwss_add_hubp_disconnect(seq_state, hubp); + + /* Step 5: Verify pstate change high if debug sanity checks are enabled */ + if (dc->debug.sanity_checks) + dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state); +} + +void dcn401_blank_pixel_data_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank, + struct block_sequence_state *seq_state) +{ + struct tg_color black_color = {0}; + struct stream_resource *stream_res = &pipe_ctx->stream_res; + struct dc_stream_state *stream = pipe_ctx->stream; + enum dc_color_space color_space = stream->output_color_space; + enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; + enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; + struct pipe_ctx *odm_pipe; + struct rect odm_slice_src; + + if (stream->link->test_pattern_enabled) + return; + + /* get opp dpg blank color */ + color_space_to_black_color(dc, color_space, &black_color); + + if (blank) { + /* Set ABM immediate disable */ + hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx); + + if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { + test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; + test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + } + } else { + test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; + } + + odm_pipe = pipe_ctx; + + /* Set display pattern generator for all ODM pipes */ + while (odm_pipe->next_odm_pipe) { + odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe); + + hwss_add_opp_set_disp_pattern_generator(seq_state, + odm_pipe->stream_res.opp, + test_pattern, + test_pattern_color_space, + stream->timing.display_color_depth, + black_color, + true, + odm_slice_src.width, + odm_slice_src.height, + odm_slice_src.x); + + odm_pipe = odm_pipe->next_odm_pipe; + } + + /* Set display pattern generator for final ODM pipe */ + odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe); + + hwss_add_opp_set_disp_pattern_generator(seq_state, + odm_pipe->stream_res.opp, + test_pattern, + test_pattern_color_space, + stream->timing.display_color_depth, + black_color, + true, + odm_slice_src.width, + odm_slice_src.height, + odm_slice_src.x); + + /* Handle ABM level setting when not blanking */ + if (!blank) { + if (stream_res->abm) { + /* Set pipe for ABM */ + hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx); + + /* Set ABM level */ + hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level); + } + } +} + +void dcn401_program_all_writeback_pipes_in_tree_sequence( + struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + int i_wb, i_pipe; + + if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb) + return; + + /* For each writeback pipe */ + for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) { + /* Get direct pointer to writeback info */ + struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb]; + int mpcc_inst = -1; + + if (wb_info->wb_enabled) { + /* Get the MPCC instance for writeback_source_plane */ + for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe]; + + if (!pipe_ctx->plane_state) + continue; + + if (pipe_ctx->plane_state == wb_info->writeback_source_plane) { + mpcc_inst = pipe_ctx->plane_res.mpcc_inst; + break; + } + } + + if (mpcc_inst == -1) { + /* Disable writeback pipe and disconnect from MPCC + * if source plane has been removed + */ + dcn401_disable_writeback_sequence(dc, wb_info, seq_state); + continue; + } + + ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb); + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) { + /* Writeback pipe already enabled, only need to update */ + dcn401_update_writeback_sequence(dc, wb_info, context, seq_state); + } else { + /* Enable writeback pipe and connect to MPCC */ + dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state); + } + } else { + /* Disable writeback pipe and disconnect from MPCC */ + dcn401_disable_writeback_sequence(dc, wb_info, seq_state); + } + } +} + +void dcn401_enable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + int mpcc_inst, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + struct mcif_wb *mcif_wb; + + if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb) + return; + + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; + + /* Update DWBC with new parameters */ + hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params); + + /* Configure MCIF_WB buffer settings */ + hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); + + /* Configure MCIF_WB arbitration */ + hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); + + /* Enable MCIF_WB */ + hwss_add_mcif_wb_enable(seq_state, mcif_wb); + + /* Set DWB MUX to connect writeback to MPCC */ + hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst); + + /* Enable DWBC */ + hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params); +} + +void dcn401_disable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + struct mcif_wb *mcif_wb; + + if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb) + return; + + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; + + /* Disable DWBC */ + hwss_add_dwbc_disable(seq_state, dwb); + + /* Disable DWB MUX */ + hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst); + + /* Disable MCIF_WB */ + hwss_add_mcif_wb_disable(seq_state, mcif_wb); +} + +void dcn401_update_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + struct mcif_wb *mcif_wb; + + if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb) + return; + + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; + + /* Update writeback pipe */ + hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params); + + /* Update MCIF_WB buffer settings if needed */ + hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); +} + +static int find_free_gsl_group(const struct dc *dc) +{ + if (dc->res_pool->gsl_groups.gsl_0 == 0) + return 1; + if (dc->res_pool->gsl_groups.gsl_1 == 0) + return 2; + if (dc->res_pool->gsl_groups.gsl_2 == 0) + return 3; + + return 0; +} + +void dcn401_setup_gsl_group_as_lock_sequence( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable, + struct block_sequence_state *seq_state) +{ + struct gsl_params gsl; + int group_idx; + + memset(&gsl, 0, sizeof(struct gsl_params)); + + if (enable) { + /* return if group already assigned since GSL was set up + * for vsync flip, we would unassign so it can't be "left over" + */ + if (pipe_ctx->stream_res.gsl_group > 0) + return; + + group_idx = find_free_gsl_group(dc); + ASSERT(group_idx != 0); + pipe_ctx->stream_res.gsl_group = group_idx; + + /* set gsl group reg field and mark resource used */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 1; + dc->res_pool->gsl_groups.gsl_0 = 1; + break; + case 2: + gsl.gsl1_en = 1; + dc->res_pool->gsl_groups.gsl_1 = 1; + break; + case 3: + gsl.gsl2_en = 1; + dc->res_pool->gsl_groups.gsl_2 = 1; + break; + default: + BREAK_TO_DEBUGGER(); + return; // invalid case + } + gsl.gsl_master_en = 1; + } else { + group_idx = pipe_ctx->stream_res.gsl_group; + if (group_idx == 0) + return; // if not in use, just return + + pipe_ctx->stream_res.gsl_group = 0; + + /* unset gsl group reg field and mark resource free */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 0; + dc->res_pool->gsl_groups.gsl_0 = 0; + break; + case 2: + gsl.gsl1_en = 0; + dc->res_pool->gsl_groups.gsl_1 = 0; + break; + case 3: + gsl.gsl2_en = 0; + dc->res_pool->gsl_groups.gsl_2 = 0; + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + gsl.gsl_master_en = 0; + } + + hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl); + hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); +} + +void dcn401_disable_plane_sequence( + struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; + struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; + + if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) + return; + + /* Wait for MPCC disconnect */ + if (dc->hwss.wait_for_mpcc_disconnect_sequence) + dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state); + + /* In flip immediate with pipe splitting case GSL is used for synchronization + * so we must disable it when the plane is disabled. + */ + if (pipe_ctx->stream_res.gsl_group != 0) + dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state); + + /* Update HUBP mall sel */ + if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel) + hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false); + + /* Set flip control GSL */ + hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false); + + /* HUBP clock control */ + hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false); + + /* DPP clock control */ + hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false); + + /* Plane atomic power down */ + if (dc->hwseq->funcs.plane_atomic_power_down_sequence) + dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp, + pipe_ctx->plane_res.hubp, seq_state); + + pipe_ctx->stream = NULL; + memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res)); + memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res)); + pipe_ctx->top_pipe = NULL; + pipe_ctx->bottom_pipe = NULL; + pipe_ctx->prev_odm_pipe = NULL; + pipe_ctx->next_odm_pipe = NULL; + pipe_ctx->plane_state = NULL; + + /* Turn back off the phantom OTG after the phantom plane is fully disabled */ + if (is_phantom && tg && tg->funcs->disable_phantom_crtc) + hwss_add_disable_phantom_crtc(seq_state, tg); +} + +void dcn401_post_unlock_reset_opp_sequence( + struct dc *dc, + struct pipe_ctx *opp_head, + struct block_sequence_state *seq_state) +{ + struct display_stream_compressor *dsc = opp_head->stream_res.dsc; + struct dccg *dccg = dc->res_pool->dccg; + + /* Wait for all DPP pipes in current mpc blending tree completes double + * buffered disconnection before resetting OPP + */ + if (dc->hwss.wait_for_mpcc_disconnect_sequence) + dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state); + + if (dsc) { + bool *is_ungated = NULL; + /* Check DSC power gate status */ + if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status) + hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false); + + /* Seamless update specific where we will postpone non + * double buffered DSCCLK disable logic in post unlock + * sequence after DSC is disconnected from OPP but not + * yet power gated. + */ + + /* DSC wait disconnect pending clear */ + hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated); + + /* DSC disable */ + hwss_add_dsc_disable(seq_state, dsc, is_ungated); + + /* Set reference DSCCLK */ + if (dccg && dccg->funcs->set_ref_dscclk) + hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0); + } +} + +void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable) +{ + struct dce_hwseq *hws = dc->hwseq; + + if (REG(DC_IP_REQUEST_CNTL)) + REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0); +} + +void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + uint32_t org_ip_request_cntl = 0; + + if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp) + return; + + if (REG(DC_IP_REQUEST_CNTL)) + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + + /* Step 1: DPP root clock control - enable clock */ + if (hws->funcs.dpp_root_clock_control) + hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true); + + /* Step 2: Enable DC IP request (if needed) */ + if (hws->funcs.dc_ip_request_cntl) + hwss_add_dc_ip_request_cntl(seq_state, dc, true); + + /* Step 3: DPP power gating control - power on */ + if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control) + hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true); + + /* Step 4: HUBP power gating control - power on */ + if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control) + hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true); + + /* Step 5: Disable DC IP request (restore state) */ + if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl) + hwss_add_dc_ip_request_cntl(seq_state, dc, false); + + /* Step 6: HUBP clock control - enable DCFCLK */ + if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl) + hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true); + + /* Step 7: HUBP initialization */ + if (pipe_ctx->plane_res.hubp->funcs->hubp_init) + hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp); + + /* Step 8: OPP pipe clock control - enable */ + if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control) + hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true); + + /* Step 9: VM system aperture settings */ + if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) { + hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0, + dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr); + } + + /* Step 10: Flip interrupt setup */ + if (!pipe_ctx->top_pipe + && pipe_ctx->plane_state + && pipe_ctx->plane_state->flip_int_enabled + && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) { + hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp); + } +} + +void dcn401_update_dchubp_dpp_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + struct dccg *dccg = dc->res_pool->dccg; + bool viewport_changed = false; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx); + + if (!hubp || !dpp || !plane_state) + return; + + /* Step 1: DPP DPPCLK control */ + if (pipe_ctx->update_flags.bits.dppclk) + hwss_add_dpp_dppclk_control(seq_state, dpp, false, true); + + /* Step 2: DCCG update DPP DTO */ + if (pipe_ctx->update_flags.bits.enable) + hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz); + + /* Step 3: HUBP VTG selection */ + if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { + hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst); + + /* Step 4: HUBP setup (choose setup2 or setup) */ + if (hubp->funcs->hubp_setup2) { + hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs, + &pipe_ctx->global_sync, &pipe_ctx->stream->timing); + } else if (hubp->funcs->hubp_setup) { + hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param); + } + } + + /* Step 5: Set unbounded requesting */ + if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting) + hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req); + + /* Step 6: HUBP interdependent setup */ + if (pipe_ctx->update_flags.bits.hubp_interdependent) { + if (hubp->funcs->hubp_setup_interdependent2) + hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs); + else if (hubp->funcs->hubp_setup_interdependent) + hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs); + } + + /* Step 7: DPP setup - input CSC and format setup */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.input_csc_change || + plane_state->update_flags.bits.color_space_change || + plane_state->update_flags.bits.coeff_reduction_change) { + hwss_add_dpp_setup_dpp(seq_state, pipe_ctx); + + /* Step 8: DPP cursor matrix setup */ + if (dpp->funcs->set_cursor_matrix) { + hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space, + &plane_state->cursor_csc_color_matrix); + } + + /* Step 9: DPP program bias and scale */ + if (dpp->funcs->dpp_program_bias_and_scale) + hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx); + } + + /* Step 10: MPCC updates */ + if (pipe_ctx->update_flags.bits.mpcc || + pipe_ctx->update_flags.bits.plane_changed || + plane_state->update_flags.bits.global_alpha_change || + plane_state->update_flags.bits.per_pixel_alpha_change) { + + /* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */ + if (hws->funcs.update_mpcc_sequence) + hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state); + } + + /* Step 11: DPP scaler setup */ + if (pipe_ctx->update_flags.bits.scaler || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.position_change || + plane_state->update_flags.bits.per_pixel_alpha_change || + pipe_ctx->stream->update_flags.bits.scaling) { + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; + ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP); + hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); + } + + /* Step 12: HUBP viewport programming */ + if (pipe_ctx->update_flags.bits.viewport || + (context == dc->current_state && plane_state->update_flags.bits.position_change) || + (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { + hwss_add_hubp_mem_program_viewport(seq_state, hubp, + &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c); + viewport_changed = true; + } + + /* Step 13: HUBP program mcache if available */ + if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate) + hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs); + + /* Step 14: Cursor attribute setup */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || + pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && + pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + + hwss_add_abort_cursor_offload_update(seq_state, dc, pipe_ctx); + + hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx); + + /* Step 15: Cursor position setup */ + hwss_add_set_cursor_position(seq_state, dc, pipe_ctx); + + /* Step 16: Cursor SDR white level */ + if (dc->hwss.set_cursor_sdr_white_level) + hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx); + } + + /* Step 17: Gamut remap and output CSC */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->stream->update_flags.bits.gamut_remap || + plane_state->update_flags.bits.gamut_remap_change || + pipe_ctx->stream->update_flags.bits.out_csc) { + + /* Gamut remap */ + hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx); + + /* Output CSC */ + hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space, + pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id); + } + + /* Step 18: HUBP surface configuration */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->update_flags.bits.opp_changed || + plane_state->update_flags.bits.pixel_format_change || + plane_state->update_flags.bits.horizontal_mirror_change || + plane_state->update_flags.bits.rotation_change || + plane_state->update_flags.bits.swizzle_change || + plane_state->update_flags.bits.dcc_change || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.plane_size_change) { + struct plane_size size = plane_state->plane_size; + + size.surface_size = pipe_ctx->plane_res.scl_data.viewport; + hwss_add_hubp_program_surface_config(seq_state, hubp, + plane_state->format, &plane_state->tiling_info, size, + plane_state->rotation, &plane_state->dcc, + plane_state->horizontal_mirror, 0); + hubp->power_gated = false; + } + + /* Step 19: Update plane address (with SubVP support) */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + plane_state->update_flags.bits.addr_update) { + + /* SubVP save surface address if needed */ + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) { + hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv, + &pipe_ctx->plane_state->address, pipe_ctx->subvp_index); + } + + /* Update plane address */ + hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx); + } + + /* Step 20: HUBP set blank - enable plane */ + if (pipe_ctx->update_flags.bits.enable) + hwss_add_hubp_set_blank(seq_state, hubp, false); + + /* Step 21: Phantom HUBP post enable */ + if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable) + hwss_add_phantom_hubp_post_enable(seq_state, hubp); +} + +void dcn401_update_mpcc_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct mpcc_blnd_cfg blnd_cfg = {0}; + bool per_pixel_alpha; + int mpcc_id; + struct mpcc *new_mpcc; + struct mpc *mpc = dc->res_pool->mpc; + struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); + + if (!hubp || !pipe_ctx->plane_state) + return; + + per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; + + /* Initialize blend configuration */ + blnd_cfg.overlap_only = false; + blnd_cfg.global_gain = 0xff; + + if (per_pixel_alpha) { + blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha; + if (pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } + } else { + blnd_cfg.pre_multiplied_alpha = false; + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + + if (pipe_ctx->plane_state->global_alpha) + blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; + else + blnd_cfg.global_alpha = 0xff; + + blnd_cfg.background_color_bpc = 4; + blnd_cfg.bottom_gain_mode = 0; + blnd_cfg.top_gain = 0x1f000; + blnd_cfg.bottom_inside_gain = 0x1f000; + blnd_cfg.bottom_outside_gain = 0x1f000; + + if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA) + blnd_cfg.pre_multiplied_alpha = false; + + /* MPCC instance is equal to HUBP instance */ + mpcc_id = hubp->inst; + + /* Step 1: Update blending if no full update needed */ + if (!pipe_ctx->plane_state->update_flags.bits.full_update && + !pipe_ctx->update_flags.bits.mpcc) { + + /* Update blending configuration */ + hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id); + + /* Update visual confirm color */ + hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id); + return; + } + + /* Step 2: Get existing MPCC for DPP */ + new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); + + /* Step 3: Remove MPCC if being used */ + if (new_mpcc != NULL) { + hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc); + } else { + /* Step 4: Assert MPCC idle (debug only) */ + if (dc->debug.sanity_checks) + hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id); + } + + /* Step 5: Insert new plane into MPC tree */ + hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id); + + /* Step 6: Update visual confirm color */ + hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id); + + /* Step 7: Set HUBP OPP and MPCC IDs */ + hubp->opp_id = pipe_ctx->stream_res.opp->inst; + hubp->mpcc_id = mpcc_id; +} + +static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst) +{ + int i; + + for (i = 0; i < res_pool->pipe_count; i++) { + if (res_pool->hubps[i]->inst == mpcc_inst) + return res_pool->hubps[i]; + } + ASSERT(false); + return NULL; +} + +void dcn401_wait_for_mpcc_disconnect_sequence( + struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + int mpcc_inst; + + if (dc->debug.sanity_checks) + dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state); + + if (!pipe_ctx->stream_res.opp) + return; + + for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { + if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { + struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); + + if (pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) { + hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst); + } + pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; + if (hubp) + hwss_add_hubp_set_blank(seq_state, hubp, true); + } + } + + if (dc->debug.sanity_checks) + dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state); +} + +void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct timing_generator *tg = pipe_ctx->stream_res.tg; + int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); + + if (start_line < 0) + start_line = 0; + + if (tg->funcs->setup_vertical_interrupt2) + hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line); +} + +void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult; + uint32_t hw_mult = 0x1f000; // 1.0 default multiplier + struct custom_float_format fmt; + + fmt.exponenta_bits = 6; + fmt.mantissa_bits = 12; + fmt.sign = true; + + if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0 + convert_to_custom_float_format(multiplier, &fmt, &hw_mult); + + hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult); +} + +void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context, + struct block_sequence_state *seq_state) +{ + int i; + unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context); + bool cache_cursor = false; + + // Don't force p-state disallow -- can't block dummy p-state + + // Update MALL_SEL register for each pipe (break down update_mall_sel call) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct hubp *hubp = pipe->plane_res.hubp; + + if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { + int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; + + switch (hubp->curs_attr.color_format) { + case CURSOR_MODE_MONO: + cursor_size /= 2; + break; + case CURSOR_MODE_COLOR_1BIT_AND: + case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: + case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: + cursor_size *= 4; + break; + + case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: + case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: + default: + cursor_size *= 8; + break; + } + + if (cursor_size > 16384) + cache_cursor = true; + + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false); + } else { + // MALL not supported with Stereo3D + uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways && + pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED && + pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO && + !pipe->plane_state->address.tmz_surface) ? 2 : 0; + hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor); + } + } + } + + // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct hubp *hubp = pipe->plane_res.hubp; + + if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) { + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) + hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true); + } + } +} + +void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc, + struct block_sequence_state *seq_state) +{ + struct hubbub *hubbub = dc->res_pool->hubbub; + + if (!hubbub->funcs->verify_allow_pstate_change_high) + return; + + if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) { + /* Attempt hardware workaround force recovery */ + dcn401_hw_wa_force_recovery_sequence(dc, seq_state); + } +} + +bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc, + struct block_sequence_state *seq_state) +{ + struct hubp *hubp; + unsigned int i; + + if (!dc->debug.recovery_enabled) + return false; + + /* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->set_hubp_blank_en) + hwss_add_hubp_set_blank_en(seq_state, hubp, true); + } + } + + /* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */ + hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true); + + /* Step 3: Set HUBP_DISABLE=1 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->hubp_disable_control) + hwss_add_hubp_disable_control(seq_state, hubp, true); + } + } + + /* Step 4: Set HUBP_DISABLE=0 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->hubp_disable_control) + hwss_add_hubp_disable_control(seq_state, hubp, false); + } + } + + /* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */ + hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false); + + /* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->set_hubp_blank_en) + hwss_add_hubp_set_blank_en(seq_state, hubp, false); + } + } + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index 2621b7725267..f78162ab859b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -9,6 +9,7 @@ #include "dc.h" #include "dc_stream.h" #include "hw_sequencer_private.h" +#include "hwss/hw_sequencer.h" #include "dcn401/dcn401_dccg.h" struct dc; @@ -73,15 +74,17 @@ void dcn401_optimize_bandwidth( struct dc *dc, struct dc_state *context); -void dcn401_fams2_global_control_lock(struct dc *dc, +void dcn401_dmub_hw_control_lock(struct dc *dc, struct dc_state *context, bool lock); void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable); -void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params); +void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params); void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); void dcn401_hardware_release(struct dc *dc); void dcn401_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); +void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context, + struct pipe_ctx *otg_master, struct block_sequence_state *seq_state); void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy); void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock); @@ -97,6 +100,11 @@ void dcn401_program_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); +void dcn401_program_pipe_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx); void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context); void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context); @@ -109,5 +117,97 @@ void dcn401_detect_pipe_changes( void dcn401_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp); +void dcn401_plane_atomic_power_down_sequence(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp, + struct block_sequence_state *seq_state); +void dcn401_plane_atomic_disconnect_sequence(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); +void dcn401_blank_pixel_data_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank, + struct block_sequence_state *seq_state); void dcn401_initialize_min_clocks(struct dc *dc); +void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe); + +void dcn401_program_all_writeback_pipes_in_tree_sequence( + struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_enable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + int mpcc_inst, + struct block_sequence_state *seq_state); + +void dcn401_disable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct block_sequence_state *seq_state); + +void dcn401_update_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_setup_gsl_group_as_lock_sequence( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable, + struct block_sequence_state *seq_state); + +void dcn401_disable_plane_sequence( + struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_post_unlock_reset_opp_sequence( + struct dc *dc, + struct pipe_ctx *opp_head, + struct block_sequence_state *seq_state); + +void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable); + +void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_update_dchubp_dpp_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_update_mpcc_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_wait_for_mpcc_disconnect_sequence( + struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc, + struct block_sequence_state *seq_state); + +bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc, + struct block_sequence_state *seq_state); + #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c index d6e11b7e4fce..162096ce0bdf 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c @@ -9,6 +9,7 @@ #include "dcn30/dcn30_hwseq.h" #include "dcn31/dcn31_hwseq.h" #include "dcn32/dcn32_hwseq.h" +#include "dcn35/dcn35_hwseq.h" #include "dcn401/dcn401_hwseq.h" #include "dcn401_init.h" @@ -38,6 +39,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_plane_sequence = dcn401_disable_plane_sequence, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn401_interdependent_update_lock, .cursor_lock = dcn10_cursor_lock, @@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .wait_for_mpcc_disconnect_sequence = dcn401_wait_for_mpcc_disconnect_sequence, .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, @@ -60,6 +63,12 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .set_cursor_position = dcn401_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .abort_cursor_offload_update = dcn35_abort_cursor_offload_update, + .begin_cursor_offload_update = dcn35_begin_cursor_offload_update, + .commit_cursor_offload_update = dcn35_commit_cursor_offload_update, + .update_cursor_offload_pipe = dcn401_update_cursor_offload_pipe, + .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update, + .program_cursor_offload_now = dcn35_program_cursor_offload_now, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, @@ -95,55 +104,70 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, .wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation, .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, - .fams2_global_control_lock = dcn401_fams2_global_control_lock, + .dmub_hw_control_lock = dcn401_dmub_hw_control_lock, .fams2_update_config = dcn401_fams2_update_config, - .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast, + .dmub_hw_control_lock_fast = dcn401_dmub_hw_control_lock_fast, .program_outstanding_updates = dcn401_program_outstanding_updates, .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, .detect_pipe_changes = dcn401_detect_pipe_changes, .enable_plane = dcn20_enable_plane, + .enable_plane_sequence = dcn401_enable_plane_sequence, .update_dchubp_dpp = dcn20_update_dchubp_dpp, + .update_dchubp_dpp_sequence = dcn401_update_dchubp_dpp_sequence, .post_unlock_reset_opp = dcn20_post_unlock_reset_opp, + .post_unlock_reset_opp_sequence = dcn401_post_unlock_reset_opp_sequence, .get_underflow_debug_data = dcn30_get_underflow_debug_data, }; static const struct hwseq_private_funcs dcn401_private_funcs = { .init_pipes = dcn10_init_pipes, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .plane_atomic_disconnect_sequence = dcn401_plane_atomic_disconnect_sequence, .update_mpcc = dcn20_update_mpcc, + .update_mpcc_sequence = dcn401_update_mpcc_sequence, .set_input_transfer_func = dcn32_set_input_transfer_func, .set_output_transfer_func = dcn401_set_output_transfer_func, .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, + .blank_pixel_data_sequence = dcn401_blank_pixel_data_sequence, .reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap, .enable_stream_timing = dcn401_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .setup_vupdate_interrupt_sequence = dcn401_setup_vupdate_interrupt_sequence, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn32_init_blank, .disable_vga = dcn20_disable_vga, .bios_golden_init = dcn10_bios_golden_init, .plane_atomic_disable = dcn20_plane_atomic_disable, .plane_atomic_power_down = dcn401_plane_atomic_power_down, + .plane_atomic_power_down_sequence = dcn401_plane_atomic_power_down_sequence, .enable_power_gating_plane = dcn32_enable_power_gating_plane, .hubp_pg_control = dcn32_hubp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .program_all_writeback_pipes_in_tree_sequence = dcn401_program_all_writeback_pipes_in_tree_sequence, .update_odm = dcn401_update_odm, + .update_odm_sequence = dcn401_update_odm_sequence, .dsc_pg_control = dcn32_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .set_hdr_multiplier_sequence = dcn401_set_hdr_multiplier_sequence, .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .verify_allow_pstate_change_high_sequence = dcn401_verify_allow_pstate_change_high_sequence, .wait_for_blank_complete = dcn20_wait_for_blank_complete, .dccg_init = dcn20_dccg_init, .set_mcm_luts = dcn401_set_mcm_luts, .program_mall_pipe_config = dcn32_program_mall_pipe_config, + .program_mall_pipe_config_sequence = dcn401_program_mall_pipe_config_sequence, .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = NULL, .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, .reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe, .populate_mcm_luts = NULL, .perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock, + .program_pipe_sequence = dcn401_program_pipe_sequence, + .dc_ip_request_cntl = dcn401_dc_ip_request_cntl, }; void dcn401_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 1723bbcf2c46..8ed9eea40c56 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -31,6 +31,8 @@ #include "inc/hw/opp.h" #include "inc/hw/link_encoder.h" #include "inc/core_status.h" +#include "inc/hw/hw_shared.h" +#include "dsc/dsc.h" struct pipe_ctx; struct dc_state; @@ -48,6 +50,8 @@ struct dc_dmub_cmd; struct pg_block_update; struct drr_params; struct dc_underflow_debug_data; +struct dsc_optc_config; +struct vm_system_aperture_param; struct subvp_pipe_control_lock_fast_params { struct dc *dc; @@ -62,7 +66,7 @@ struct pipe_control_lock_params { }; struct set_flip_control_gsl_params { - struct pipe_ctx *pipe_ctx; + struct hubp *hubp; bool flip_immediate; }; @@ -148,12 +152,587 @@ struct wait_for_dcc_meta_propagation_params { const struct pipe_ctx *top_pipe_to_program; }; -struct fams2_global_control_lock_fast_params { +struct dmub_hw_control_lock_fast_params { struct dc *dc; bool is_required; bool lock; }; +struct program_surface_config_params { + struct hubp *hubp; + enum surface_pixel_format format; + struct dc_tiling_info *tiling_info; + struct plane_size plane_size; + enum dc_rotation_angle rotation; + struct dc_plane_dcc_param *dcc; + bool horizontal_mirror; + int compat_level; +}; + +struct program_mcache_id_and_split_coordinate { + struct hubp *hubp; + struct dml2_hubp_pipe_mcache_regs *mcache_regs; +}; + +struct program_cursor_update_now_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct hubp_wait_pipe_read_start_params { + struct hubp *hubp; +}; + +struct apply_update_flags_for_phantom_params { + struct pipe_ctx *pipe_ctx; +}; + +struct update_phantom_vp_position_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_state *context; +}; + +struct set_odm_combine_params { + struct timing_generator *tg; + int opp_inst[MAX_PIPES]; + int opp_head_count; + int odm_slice_width; + int last_odm_slice_width; +}; + +struct set_odm_bypass_params { + struct timing_generator *tg; + const struct dc_crtc_timing *timing; +}; + +struct opp_pipe_clock_control_params { + struct output_pixel_processor *opp; + bool enable; +}; + +struct opp_program_left_edge_extra_pixel_params { + struct output_pixel_processor *opp; + enum dc_pixel_encoding pixel_encoding; + bool is_otg_master; +}; + +struct dccg_set_dto_dscclk_params { + struct dccg *dccg; + int inst; + int num_slices_h; +}; + +struct dsc_set_config_params { + struct display_stream_compressor *dsc; + struct dsc_config *dsc_cfg; + struct dsc_optc_config *dsc_optc_cfg; +}; + +struct dsc_enable_params { + struct display_stream_compressor *dsc; + int opp_inst; +}; + +struct tg_set_dsc_config_params { + struct timing_generator *tg; + struct dsc_optc_config *dsc_optc_cfg; + bool enable; +}; + +struct dsc_disconnect_params { + struct display_stream_compressor *dsc; +}; + +struct dsc_read_state_params { + struct display_stream_compressor *dsc; + struct dcn_dsc_state *dsc_state; +}; + +struct dsc_calculate_and_set_config_params { + struct pipe_ctx *pipe_ctx; + struct dsc_optc_config dsc_optc_cfg; + bool enable; + int opp_cnt; +}; + +struct dsc_enable_with_opp_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_tg_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_state *context; +}; + +struct tg_program_global_sync_params { + struct timing_generator *tg; + int vready_offset; + unsigned int vstartup_lines; + unsigned int vupdate_offset_pixels; + unsigned int vupdate_vupdate_width_pixels; + unsigned int pstate_keepout_start_lines; +}; + +struct tg_wait_for_state_params { + struct timing_generator *tg; + enum crtc_state state; +}; + +struct tg_set_vtg_params_params { + struct timing_generator *tg; + struct dc_crtc_timing *timing; + bool program_fp2; +}; + +struct tg_set_gsl_params { + struct timing_generator *tg; + struct gsl_params gsl; +}; + +struct tg_set_gsl_source_select_params { + struct timing_generator *tg; + int group_idx; + uint32_t gsl_ready_signal; +}; + +struct setup_vupdate_interrupt_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct tg_setup_vertical_interrupt2_params { + struct timing_generator *tg; + int start_line; +}; + +struct dpp_set_hdr_multiplier_params { + struct dpp *dpp; + uint32_t hw_mult; +}; + +struct program_det_size_params { + struct hubbub *hubbub; + unsigned int hubp_inst; + unsigned int det_buffer_size_kb; +}; + +struct program_det_segments_params { + struct hubbub *hubbub; + unsigned int hubp_inst; + unsigned int det_size; +}; + +struct update_dchubp_dpp_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_state *context; +}; + +struct opp_set_dyn_expansion_params { + struct output_pixel_processor *opp; + enum dc_color_space color_space; + enum dc_color_depth color_depth; + enum signal_type signal; +}; + +struct opp_program_fmt_params { + struct output_pixel_processor *opp; + struct bit_depth_reduction_params *fmt_bit_depth; + struct clamping_and_pixel_encoding_params *clamping; +}; + +struct opp_program_bit_depth_reduction_params { + struct output_pixel_processor *opp; + bool use_default_params; + struct pipe_ctx *pipe_ctx; +}; + +struct opp_set_disp_pattern_generator_params { + struct output_pixel_processor *opp; + enum controller_dp_test_pattern test_pattern; + enum controller_dp_color_space color_space; + enum dc_color_depth color_depth; + struct tg_color solid_color; + bool use_solid_color; + int width; + int height; + int offset; +}; + +struct set_abm_pipe_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_abm_level_params { + struct abm *abm; + unsigned int abm_level; +}; + +struct set_abm_immediate_disable_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_disp_pattern_generator_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + enum controller_dp_test_pattern test_pattern; + enum controller_dp_color_space color_space; + enum dc_color_depth color_depth; + const struct tg_color *solid_color; + int width; + int height; + int offset; +}; + +struct mpc_update_blending_params { + struct mpc *mpc; + struct mpcc_blnd_cfg blnd_cfg; + int mpcc_id; +}; + +struct mpc_assert_idle_mpcc_params { + struct mpc *mpc; + int mpcc_id; +}; + +struct mpc_insert_plane_params { + struct mpc *mpc; + struct mpc_tree *mpc_tree_params; + struct mpcc_blnd_cfg blnd_cfg; + struct mpcc_sm_cfg *sm_cfg; + struct mpcc *insert_above_mpcc; + int dpp_id; + int mpcc_id; +}; + +struct mpc_remove_mpcc_params { + struct mpc *mpc; + struct mpc_tree *mpc_tree_params; + struct mpcc *mpcc_to_remove; +}; + +struct opp_set_mpcc_disconnect_pending_params { + struct output_pixel_processor *opp; + int mpcc_inst; + bool pending; +}; + +struct dc_set_optimized_required_params { + struct dc *dc; + bool optimized_required; +}; + +struct hubp_disconnect_params { + struct hubp *hubp; +}; + +struct hubbub_force_pstate_change_control_params { + struct hubbub *hubbub; + bool enable; + bool wait; +}; + +struct tg_enable_crtc_params { + struct timing_generator *tg; +}; + +struct hubp_wait_flip_pending_params { + struct hubp *hubp; + unsigned int timeout_us; + unsigned int polling_interval_us; +}; + +struct tg_wait_double_buffer_pending_params { + struct timing_generator *tg; + unsigned int timeout_us; + unsigned int polling_interval_us; +}; + +struct update_force_pstate_params { + struct dc *dc; + struct dc_state *context; +}; + +struct hubbub_apply_dedcn21_147_wa_params { + struct hubbub *hubbub; +}; + +struct hubbub_allow_self_refresh_control_params { + struct hubbub *hubbub; + bool allow; + bool *disallow_self_refresh_applied; +}; + +struct tg_get_frame_count_params { + struct timing_generator *tg; + unsigned int *frame_count; +}; + +struct mpc_set_dwb_mux_params { + struct mpc *mpc; + int dwb_id; + int mpcc_id; +}; + +struct mpc_disable_dwb_mux_params { + struct mpc *mpc; + unsigned int dwb_id; +}; + +struct mcif_wb_config_buf_params { + struct mcif_wb *mcif_wb; + struct mcif_buf_params *mcif_buf_params; + unsigned int dest_height; +}; + +struct mcif_wb_config_arb_params { + struct mcif_wb *mcif_wb; + struct mcif_arb_params *mcif_arb_params; +}; + +struct mcif_wb_enable_params { + struct mcif_wb *mcif_wb; +}; + +struct mcif_wb_disable_params { + struct mcif_wb *mcif_wb; +}; + +struct dwbc_enable_params { + struct dwbc *dwb; + struct dc_dwb_params *dwb_params; +}; + +struct dwbc_disable_params { + struct dwbc *dwb; +}; + +struct dwbc_update_params { + struct dwbc *dwb; + struct dc_dwb_params *dwb_params; +}; + +struct hubp_update_mall_sel_params { + struct hubp *hubp; + uint32_t mall_sel; + bool cache_cursor; +}; + +struct hubp_prepare_subvp_buffering_params { + struct hubp *hubp; + bool enable; +}; + +struct hubp_set_blank_en_params { + struct hubp *hubp; + bool enable; +}; + +struct hubp_disable_control_params { + struct hubp *hubp; + bool disable; +}; + +struct hubbub_soft_reset_params { + struct hubbub *hubbub; + void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset); + bool reset; +}; + +struct hubp_clk_cntl_params { + struct hubp *hubp; + bool enable; +}; + +struct hubp_init_params { + struct hubp *hubp; +}; + +struct hubp_set_vm_system_aperture_settings_params { + struct hubp *hubp; + //struct vm_system_aperture_param apt; + PHYSICAL_ADDRESS_LOC sys_default; + PHYSICAL_ADDRESS_LOC sys_low; + PHYSICAL_ADDRESS_LOC sys_high; +}; + +struct hubp_set_flip_int_params { + struct hubp *hubp; +}; + +struct dpp_dppclk_control_params { + struct dpp *dpp; + bool dppclk_div; + bool enable; +}; + +struct disable_phantom_crtc_params { + struct timing_generator *tg; +}; + +struct dpp_pg_control_params { + struct dce_hwseq *hws; + unsigned int dpp_inst; + bool power_on; +}; + +struct hubp_pg_control_params { + struct dce_hwseq *hws; + unsigned int hubp_inst; + bool power_on; +}; + +struct hubp_reset_params { + struct hubp *hubp; +}; + +struct dpp_reset_params { + struct dpp *dpp; +}; + +struct dpp_root_clock_control_params { + struct dce_hwseq *hws; + unsigned int dpp_inst; + bool clock_on; +}; + +struct dc_ip_request_cntl_params { + struct dc *dc; + bool enable; +}; + +struct dsc_pg_status_params { + struct dce_hwseq *hws; + int dsc_inst; + bool is_ungated; +}; + +struct dsc_wait_disconnect_pending_clear_params { + struct display_stream_compressor *dsc; + bool *is_ungated; +}; + +struct dsc_disable_params { + struct display_stream_compressor *dsc; + bool *is_ungated; +}; + +struct dccg_set_ref_dscclk_params { + struct dccg *dccg; + int dsc_inst; + bool *is_ungated; +}; + +struct dccg_update_dpp_dto_params { + struct dccg *dccg; + int dpp_inst; + int dppclk_khz; +}; + +struct hubp_vtg_sel_params { + struct hubp *hubp; + uint32_t otg_inst; +}; + +struct hubp_setup2_params { + struct hubp *hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs; + union dml2_global_sync_programming *global_sync; + struct dc_crtc_timing *timing; +}; + +struct hubp_setup_params { + struct hubp *hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs; + struct _vcs_dpi_display_rq_regs_st *rq_regs; + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest; +}; + +struct hubp_set_unbounded_requesting_params { + struct hubp *hubp; + bool unbounded_req; +}; + +struct hubp_setup_interdependent2_params { + struct hubp *hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs; +}; + +struct hubp_setup_interdependent_params { + struct hubp *hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs; +}; + +struct dpp_set_cursor_matrix_params { + struct dpp *dpp; + enum dc_color_space color_space; + struct dc_csc_transform *cursor_csc_color_matrix; +}; + +struct mpc_update_mpcc_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct dpp_set_scaler_params { + struct dpp *dpp; + const struct scaler_data *scl_data; +}; + +struct hubp_mem_program_viewport_params { + struct hubp *hubp; + const struct rect *viewport; + const struct rect *viewport_c; +}; + +struct hubp_program_mcache_id_and_split_coordinate_params { + struct hubp *hubp; + struct mcache_regs_struct *mcache_regs; +}; + +struct abort_cursor_offload_update_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_cursor_attribute_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_cursor_position_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_cursor_sdr_white_level_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct program_output_csc_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + enum dc_color_space colorspace; + uint16_t *matrix; + int opp_id; +}; + +struct hubp_set_blank_params { + struct hubp *hubp; + bool blank; +}; + +struct phantom_hubp_post_enable_params { + struct hubp *hubp; +}; + union block_sequence_params { struct update_plane_addr_params update_plane_addr_params; struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params; @@ -173,7 +752,108 @@ union block_sequence_params { struct set_ocsc_default_params set_ocsc_default_params; struct subvp_save_surf_addr subvp_save_surf_addr; struct wait_for_dcc_meta_propagation_params wait_for_dcc_meta_propagation_params; - struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params; + struct dmub_hw_control_lock_fast_params dmub_hw_control_lock_fast_params; + struct program_surface_config_params program_surface_config_params; + struct program_mcache_id_and_split_coordinate program_mcache_id_and_split_coordinate; + struct program_cursor_update_now_params program_cursor_update_now_params; + struct hubp_wait_pipe_read_start_params hubp_wait_pipe_read_start_params; + struct apply_update_flags_for_phantom_params apply_update_flags_for_phantom_params; + struct update_phantom_vp_position_params update_phantom_vp_position_params; + struct set_odm_combine_params set_odm_combine_params; + struct set_odm_bypass_params set_odm_bypass_params; + struct opp_pipe_clock_control_params opp_pipe_clock_control_params; + struct opp_program_left_edge_extra_pixel_params opp_program_left_edge_extra_pixel_params; + struct dccg_set_dto_dscclk_params dccg_set_dto_dscclk_params; + struct dsc_set_config_params dsc_set_config_params; + struct dsc_enable_params dsc_enable_params; + struct tg_set_dsc_config_params tg_set_dsc_config_params; + struct dsc_disconnect_params dsc_disconnect_params; + struct dsc_read_state_params dsc_read_state_params; + struct dsc_calculate_and_set_config_params dsc_calculate_and_set_config_params; + struct dsc_enable_with_opp_params dsc_enable_with_opp_params; + struct program_tg_params program_tg_params; + struct tg_program_global_sync_params tg_program_global_sync_params; + struct tg_wait_for_state_params tg_wait_for_state_params; + struct tg_set_vtg_params_params tg_set_vtg_params_params; + struct tg_setup_vertical_interrupt2_params tg_setup_vertical_interrupt2_params; + struct dpp_set_hdr_multiplier_params dpp_set_hdr_multiplier_params; + struct tg_set_gsl_params tg_set_gsl_params; + struct tg_set_gsl_source_select_params tg_set_gsl_source_select_params; + struct setup_vupdate_interrupt_params setup_vupdate_interrupt_params; + struct program_det_size_params program_det_size_params; + struct program_det_segments_params program_det_segments_params; + struct update_dchubp_dpp_params update_dchubp_dpp_params; + struct opp_set_dyn_expansion_params opp_set_dyn_expansion_params; + struct opp_program_fmt_params opp_program_fmt_params; + struct opp_program_bit_depth_reduction_params opp_program_bit_depth_reduction_params; + struct opp_set_disp_pattern_generator_params opp_set_disp_pattern_generator_params; + struct set_abm_pipe_params set_abm_pipe_params; + struct set_abm_level_params set_abm_level_params; + struct set_abm_immediate_disable_params set_abm_immediate_disable_params; + struct set_disp_pattern_generator_params set_disp_pattern_generator_params; + struct mpc_remove_mpcc_params mpc_remove_mpcc_params; + struct opp_set_mpcc_disconnect_pending_params opp_set_mpcc_disconnect_pending_params; + struct dc_set_optimized_required_params dc_set_optimized_required_params; + struct hubp_disconnect_params hubp_disconnect_params; + struct hubbub_force_pstate_change_control_params hubbub_force_pstate_change_control_params; + struct tg_enable_crtc_params tg_enable_crtc_params; + struct hubp_wait_flip_pending_params hubp_wait_flip_pending_params; + struct tg_wait_double_buffer_pending_params tg_wait_double_buffer_pending_params; + struct update_force_pstate_params update_force_pstate_params; + struct hubbub_apply_dedcn21_147_wa_params hubbub_apply_dedcn21_147_wa_params; + struct hubbub_allow_self_refresh_control_params hubbub_allow_self_refresh_control_params; + struct tg_get_frame_count_params tg_get_frame_count_params; + struct mpc_set_dwb_mux_params mpc_set_dwb_mux_params; + struct mpc_disable_dwb_mux_params mpc_disable_dwb_mux_params; + struct mcif_wb_config_buf_params mcif_wb_config_buf_params; + struct mcif_wb_config_arb_params mcif_wb_config_arb_params; + struct mcif_wb_enable_params mcif_wb_enable_params; + struct mcif_wb_disable_params mcif_wb_disable_params; + struct dwbc_enable_params dwbc_enable_params; + struct dwbc_disable_params dwbc_disable_params; + struct dwbc_update_params dwbc_update_params; + struct hubp_update_mall_sel_params hubp_update_mall_sel_params; + struct hubp_prepare_subvp_buffering_params hubp_prepare_subvp_buffering_params; + struct hubp_set_blank_en_params hubp_set_blank_en_params; + struct hubp_disable_control_params hubp_disable_control_params; + struct hubbub_soft_reset_params hubbub_soft_reset_params; + struct hubp_clk_cntl_params hubp_clk_cntl_params; + struct hubp_init_params hubp_init_params; + struct hubp_set_vm_system_aperture_settings_params hubp_set_vm_system_aperture_settings_params; + struct hubp_set_flip_int_params hubp_set_flip_int_params; + struct dpp_dppclk_control_params dpp_dppclk_control_params; + struct disable_phantom_crtc_params disable_phantom_crtc_params; + struct dpp_pg_control_params dpp_pg_control_params; + struct hubp_pg_control_params hubp_pg_control_params; + struct hubp_reset_params hubp_reset_params; + struct dpp_reset_params dpp_reset_params; + struct dpp_root_clock_control_params dpp_root_clock_control_params; + struct dc_ip_request_cntl_params dc_ip_request_cntl_params; + struct dsc_pg_status_params dsc_pg_status_params; + struct dsc_wait_disconnect_pending_clear_params dsc_wait_disconnect_pending_clear_params; + struct dsc_disable_params dsc_disable_params; + struct dccg_set_ref_dscclk_params dccg_set_ref_dscclk_params; + struct dccg_update_dpp_dto_params dccg_update_dpp_dto_params; + struct hubp_vtg_sel_params hubp_vtg_sel_params; + struct hubp_setup2_params hubp_setup2_params; + struct hubp_setup_params hubp_setup_params; + struct hubp_set_unbounded_requesting_params hubp_set_unbounded_requesting_params; + struct hubp_setup_interdependent2_params hubp_setup_interdependent2_params; + struct hubp_setup_interdependent_params hubp_setup_interdependent_params; + struct dpp_set_cursor_matrix_params dpp_set_cursor_matrix_params; + struct mpc_update_mpcc_params mpc_update_mpcc_params; + struct mpc_update_blending_params mpc_update_blending_params; + struct mpc_assert_idle_mpcc_params mpc_assert_idle_mpcc_params; + struct mpc_insert_plane_params mpc_insert_plane_params; + struct dpp_set_scaler_params dpp_set_scaler_params; + struct hubp_mem_program_viewport_params hubp_mem_program_viewport_params; + struct abort_cursor_offload_update_params abort_cursor_offload_update_params; + struct set_cursor_attribute_params set_cursor_attribute_params; + struct set_cursor_position_params set_cursor_position_params; + struct set_cursor_sdr_white_level_params set_cursor_sdr_white_level_params; + struct program_output_csc_params program_output_csc_params; + struct hubp_set_blank_params hubp_set_blank_params; + struct phantom_hubp_post_enable_params phantom_hubp_post_enable_params; }; enum block_sequence_func { @@ -189,13 +869,111 @@ enum block_sequence_func { DPP_SETUP_DPP, DPP_PROGRAM_BIAS_AND_SCALE, DPP_SET_OUTPUT_TRANSFER_FUNC, + DPP_SET_HDR_MULTIPLIER, MPC_UPDATE_VISUAL_CONFIRM, MPC_POWER_ON_MPC_MEM_PWR, MPC_SET_OUTPUT_CSC, MPC_SET_OCSC_DEFAULT, DMUB_SUBVP_SAVE_SURF_ADDR, HUBP_WAIT_FOR_DCC_META_PROP, - DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST, + DMUB_HW_CONTROL_LOCK_FAST, + HUBP_PROGRAM_SURFACE_CONFIG, + HUBP_PROGRAM_MCACHE_ID, + PROGRAM_CURSOR_UPDATE_NOW, + HUBP_WAIT_PIPE_READ_START, + HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM, + HWS_UPDATE_PHANTOM_VP_POSITION, + OPTC_SET_ODM_COMBINE, + OPTC_SET_ODM_BYPASS, + OPP_PIPE_CLOCK_CONTROL, + OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL, + DCCG_SET_DTO_DSCCLK, + DSC_SET_CONFIG, + DSC_ENABLE, + TG_SET_DSC_CONFIG, + DSC_DISCONNECT, + DSC_READ_STATE, + DSC_CALCULATE_AND_SET_CONFIG, + DSC_ENABLE_WITH_OPP, + TG_PROGRAM_GLOBAL_SYNC, + TG_WAIT_FOR_STATE, + TG_SET_VTG_PARAMS, + TG_SETUP_VERTICAL_INTERRUPT2, + HUBP_PROGRAM_DET_SIZE, + HUBP_PROGRAM_DET_SEGMENTS, + OPP_SET_DYN_EXPANSION, + OPP_PROGRAM_FMT, + OPP_PROGRAM_BIT_DEPTH_REDUCTION, + OPP_SET_DISP_PATTERN_GENERATOR, + ABM_SET_PIPE, + ABM_SET_LEVEL, + ABM_SET_IMMEDIATE_DISABLE, + MPC_REMOVE_MPCC, + OPP_SET_MPCC_DISCONNECT_PENDING, + DC_SET_OPTIMIZED_REQUIRED, + HUBP_DISCONNECT, + HUBBUB_FORCE_PSTATE_CHANGE_CONTROL, + TG_ENABLE_CRTC, + TG_SET_GSL, + TG_SET_GSL_SOURCE_SELECT, + HUBP_WAIT_FLIP_PENDING, + TG_WAIT_DOUBLE_BUFFER_PENDING, + UPDATE_FORCE_PSTATE, + PROGRAM_MALL_PIPE_CONFIG, + HUBBUB_APPLY_DEDCN21_147_WA, + HUBBUB_ALLOW_SELF_REFRESH_CONTROL, + TG_GET_FRAME_COUNT, + MPC_SET_DWB_MUX, + MPC_DISABLE_DWB_MUX, + MCIF_WB_CONFIG_BUF, + MCIF_WB_CONFIG_ARB, + MCIF_WB_ENABLE, + MCIF_WB_DISABLE, + DWBC_ENABLE, + DWBC_DISABLE, + DWBC_UPDATE, + HUBP_UPDATE_MALL_SEL, + HUBP_PREPARE_SUBVP_BUFFERING, + HUBP_SET_BLANK_EN, + HUBP_DISABLE_CONTROL, + HUBBUB_SOFT_RESET, + HUBP_CLK_CNTL, + HUBP_INIT, + HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS, + HUBP_SET_FLIP_INT, + DPP_DPPCLK_CONTROL, + DISABLE_PHANTOM_CRTC, + DSC_PG_STATUS, + DSC_WAIT_DISCONNECT_PENDING_CLEAR, + DSC_DISABLE, + DCCG_SET_REF_DSCCLK, + DPP_PG_CONTROL, + HUBP_PG_CONTROL, + HUBP_RESET, + DPP_RESET, + DPP_ROOT_CLOCK_CONTROL, + DC_IP_REQUEST_CNTL, + DCCG_UPDATE_DPP_DTO, + HUBP_VTG_SEL, + HUBP_SETUP2, + HUBP_SETUP, + HUBP_SET_UNBOUNDED_REQUESTING, + HUBP_SETUP_INTERDEPENDENT2, + HUBP_SETUP_INTERDEPENDENT, + DPP_SET_CURSOR_MATRIX, + MPC_UPDATE_BLENDING, + MPC_ASSERT_IDLE_MPCC, + MPC_INSERT_PLANE, + DPP_SET_SCALER, + HUBP_MEM_PROGRAM_VIEWPORT, + ABORT_CURSOR_OFFLOAD_UPDATE, + SET_CURSOR_ATTRIBUTE, + SET_CURSOR_POSITION, + SET_CURSOR_SDR_WHITE_LEVEL, + PROGRAM_OUTPUT_CSC, + HUBP_SET_LEGACY_TILING_COMPAT_LEVEL, + HUBP_SET_BLANK, + PHANTOM_HUBP_POST_ENABLE, /* This must be the last value in this enum, add new ones above */ HWSS_BLOCK_SEQUENCE_FUNC_COUNT }; @@ -205,6 +983,11 @@ struct block_sequence { enum block_sequence_func func; }; +struct block_sequence_state { + struct block_sequence *steps; + unsigned int *num_steps; +}; + #define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES) struct hw_sequencer_funcs { @@ -222,6 +1005,8 @@ struct hw_sequencer_funcs { enum dc_status (*apply_ctx_to_hw)(struct dc *dc, struct dc_state *context); void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); + void (*disable_plane_sequence)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, @@ -239,6 +1024,10 @@ struct hw_sequencer_funcs { void (*wait_for_mpcc_disconnect)(struct dc *dc, struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx); + void (*wait_for_mpcc_disconnect_sequence)(struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*edp_backlight_control)( struct dc_link *link, bool enable); @@ -310,6 +1099,13 @@ struct hw_sequencer_funcs { void (*set_cursor_position)(struct pipe_ctx *pipe); void (*set_cursor_attribute)(struct pipe_ctx *pipe); void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe); + void (*abort_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe); + void (*begin_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe); + void (*commit_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe); + void (*update_cursor_offload_pipe)(struct dc *dc, const struct pipe_ctx *pipe); + void (*notify_cursor_offload_drr_update)(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream); + void (*program_cursor_offload_now)(struct dc *dc, const struct pipe_ctx *pipe); /* Colour Related */ void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx); @@ -452,13 +1248,13 @@ struct hw_sequencer_funcs { const struct dc_state *new_ctx); void (*wait_for_dcc_meta_propagation)(const struct dc *dc, const struct pipe_ctx *top_pipe_to_program); - void (*fams2_global_control_lock)(struct dc *dc, + void (*dmub_hw_control_lock)(struct dc *dc, struct dc_state *context, bool lock); void (*fams2_update_config)(struct dc *dc, struct dc_state *context, bool enable); - void (*fams2_global_control_lock_fast)(union block_sequence_params *params); + void (*dmub_hw_control_lock_fast)(union block_sequence_params *params); void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max); void (*program_outstanding_updates)(struct dc *dc, struct dc_state *context); @@ -471,11 +1267,23 @@ struct hw_sequencer_funcs { void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); + void (*enable_plane_sequence)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); void (*update_dchubp_dpp)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); + void (*update_dchubp_dpp_sequence)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); void (*post_unlock_reset_opp)(struct dc *dc, struct pipe_ctx *opp_head); + void (*post_unlock_reset_opp_sequence)( + struct dc *dc, + struct pipe_ctx *opp_head, + struct block_sequence_state *seq_state); void (*get_underflow_debug_data)(const struct dc *dc, struct timing_generator *tg, struct dc_underflow_debug_data *out_data); @@ -588,4 +1396,630 @@ void hwss_set_ocsc_default(union block_sequence_params *params); void hwss_subvp_save_surf_addr(union block_sequence_params *params); +void hwss_program_surface_config(union block_sequence_params *params); + +void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params); + +void hwss_set_odm_combine(union block_sequence_params *params); + +void hwss_set_odm_bypass(union block_sequence_params *params); + +void hwss_opp_pipe_clock_control(union block_sequence_params *params); + +void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params); + +void hwss_blank_pixel_data(union block_sequence_params *params); + +void hwss_dccg_set_dto_dscclk(union block_sequence_params *params); + +void hwss_dsc_set_config(union block_sequence_params *params); + +void hwss_dsc_enable(union block_sequence_params *params); + +void hwss_tg_set_dsc_config(union block_sequence_params *params); + +void hwss_dsc_disconnect(union block_sequence_params *params); + +void hwss_dsc_read_state(union block_sequence_params *params); + +void hwss_dsc_calculate_and_set_config(union block_sequence_params *params); + +void hwss_dsc_enable_with_opp(union block_sequence_params *params); + +void hwss_program_tg(union block_sequence_params *params); + +void hwss_tg_program_global_sync(union block_sequence_params *params); + +void hwss_tg_wait_for_state(union block_sequence_params *params); + +void hwss_tg_set_vtg_params(union block_sequence_params *params); + +void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params); + +void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params); + +void hwss_program_det_size(union block_sequence_params *params); + +void hwss_program_det_segments(union block_sequence_params *params); + +void hwss_opp_set_dyn_expansion(union block_sequence_params *params); + +void hwss_opp_program_fmt(union block_sequence_params *params); + +void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params); + +void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params); + +void hwss_set_abm_pipe(union block_sequence_params *params); + +void hwss_set_abm_level(union block_sequence_params *params); + +void hwss_set_abm_immediate_disable(union block_sequence_params *params); + +void hwss_mpc_remove_mpcc(union block_sequence_params *params); + +void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params); + +void hwss_dc_set_optimized_required(union block_sequence_params *params); + +void hwss_hubp_disconnect(union block_sequence_params *params); + +void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params); + +void hwss_tg_enable_crtc(union block_sequence_params *params); + +void hwss_tg_set_gsl(union block_sequence_params *params); + +void hwss_tg_set_gsl_source_select(union block_sequence_params *params); + +void hwss_hubp_wait_flip_pending(union block_sequence_params *params); + +void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params); + +void hwss_update_force_pstate(union block_sequence_params *params); + +void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params); + +void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params); + +void hwss_tg_get_frame_count(union block_sequence_params *params); + +void hwss_mpc_set_dwb_mux(union block_sequence_params *params); + +void hwss_mpc_disable_dwb_mux(union block_sequence_params *params); + +void hwss_mcif_wb_config_buf(union block_sequence_params *params); + +void hwss_mcif_wb_config_arb(union block_sequence_params *params); + +void hwss_mcif_wb_enable(union block_sequence_params *params); + +void hwss_mcif_wb_disable(union block_sequence_params *params); + +void hwss_dwbc_enable(union block_sequence_params *params); + +void hwss_dwbc_disable(union block_sequence_params *params); + +void hwss_dwbc_update(union block_sequence_params *params); + +void hwss_hubp_update_mall_sel(union block_sequence_params *params); + +void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params); + +void hwss_hubp_set_blank_en(union block_sequence_params *params); + +void hwss_hubp_disable_control(union block_sequence_params *params); + +void hwss_hubbub_soft_reset(union block_sequence_params *params); + +void hwss_hubp_clk_cntl(union block_sequence_params *params); + +void hwss_hubp_init(union block_sequence_params *params); + +void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params); + +void hwss_hubp_set_flip_int(union block_sequence_params *params); + +void hwss_dpp_dppclk_control(union block_sequence_params *params); + +void hwss_disable_phantom_crtc(union block_sequence_params *params); + +void hwss_dsc_pg_status(union block_sequence_params *params); + +void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params); + +void hwss_dsc_disable(union block_sequence_params *params); + +void hwss_dccg_set_ref_dscclk(union block_sequence_params *params); + +void hwss_dpp_pg_control(union block_sequence_params *params); + +void hwss_hubp_pg_control(union block_sequence_params *params); + +void hwss_hubp_reset(union block_sequence_params *params); + +void hwss_dpp_reset(union block_sequence_params *params); + +void hwss_dpp_root_clock_control(union block_sequence_params *params); + +void hwss_dc_ip_request_cntl(union block_sequence_params *params); + +void hwss_dccg_update_dpp_dto(union block_sequence_params *params); + +void hwss_hubp_vtg_sel(union block_sequence_params *params); + +void hwss_hubp_setup2(union block_sequence_params *params); + +void hwss_hubp_setup(union block_sequence_params *params); + +void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params); + +void hwss_hubp_setup_interdependent2(union block_sequence_params *params); + +void hwss_hubp_setup_interdependent(union block_sequence_params *params); + +void hwss_dpp_set_cursor_matrix(union block_sequence_params *params); + +void hwss_mpc_update_mpcc(union block_sequence_params *params); + +void hwss_mpc_update_blending(union block_sequence_params *params); + +void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params); + +void hwss_mpc_insert_plane(union block_sequence_params *params); + +void hwss_dpp_set_scaler(union block_sequence_params *params); + +void hwss_hubp_mem_program_viewport(union block_sequence_params *params); + +void hwss_abort_cursor_offload_update(union block_sequence_params *params); + +void hwss_set_cursor_attribute(union block_sequence_params *params); + +void hwss_set_cursor_position(union block_sequence_params *params); + +void hwss_set_cursor_sdr_white_level(union block_sequence_params *params); + +void hwss_program_output_csc(union block_sequence_params *params); + +void hwss_hubp_set_legacy_tiling_compat_level(union block_sequence_params *params); + +void hwss_hubp_set_blank(union block_sequence_params *params); + +void hwss_phantom_hubp_post_enable(union block_sequence_params *params); + +void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, bool lock); + +void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state, + struct hubp *hubp, bool flip_immediate); + +void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, bool enableTripleBuffer); + +void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state); + +void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream); + +void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, int mpcc_id); + +void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state, + struct mpc *mpc, int mpcc_id, bool power_on); + +void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state, + struct mpc *mpc, int opp_id, const uint16_t *regval, enum mpc_output_csc_mode ocsc_mode); + +void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state, + struct mpc *mpc, int opp_id, enum dc_color_space colorspace, enum mpc_output_csc_mode ocsc_mode); + +void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state, + struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); + +void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state, + struct dc_dmub_srv *dc_dmub_srv, struct dc_plane_address *addr, uint8_t subvp_index); + +void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *top_pipe_to_program); + +void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state, + struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); + +void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state, + struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count, + int odm_slice_width, int last_odm_slice_width); + +void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state, + struct timing_generator *optc, struct dc_crtc_timing *timing); + +void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int vready_offset, + unsigned int vstartup_lines, + unsigned int vupdate_offset_pixels, + unsigned int vupdate_vupdate_width_pixels, + unsigned int pstate_keepout_start_lines); + +void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state, + struct timing_generator *tg, enum crtc_state state); + +void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state, + struct timing_generator *tg, struct dc_crtc_timing *dc_crtc_timing, bool program_fp2); + +void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state, + struct timing_generator *tg, int start_line); + +void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state, + struct dpp *dpp, uint32_t hw_mult); + +void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state, + struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_buffer_size_kb); + +void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state, + struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs); + +void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, bool enable, bool wait); + +void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state, + struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_size); + +void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, enum dc_color_space color_sp, + enum dc_color_depth color_dpth, enum signal_type signal); + +void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, struct bit_depth_reduction_params *fmt_bit_depth, + struct clamping_and_pixel_encoding_params *clamping); + +void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx); + +void hwss_add_abm_set_level(struct block_sequence_state *seq_state, + struct abm *abm, uint32_t abm_level); + +void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg); + +void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state, + struct hubp *hubp, unsigned int timeout_us, unsigned int polling_interval_us); + +void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state, + struct timing_generator *tg, unsigned int timeout_us, unsigned int polling_interval_us); + +void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, int inst, int num_slices_h); + +void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt); + +void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove); + +void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, int mpcc_inst, bool pending); + +void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc); + +void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state, + struct dc *dc, bool optimized_required); + +void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx); + +void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, + enum dc_color_depth color_depth, + struct tg_color solid_color, + bool use_solid_color, + int width, + int height, + int offset); + +void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool use_default_params, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state, + struct dc *dc, + bool enable); + +void hwss_add_dwbc_update(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params); + +void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_buf_params *mcif_buf_params, + unsigned int dest_height); + +void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_arb_params *mcif_arb_params); + +void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb); + +void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb); + +void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + int dwb_id, + int mpcc_id); + +void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + unsigned int dwb_id); + +void hwss_add_dwbc_enable(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params); + +void hwss_add_dwbc_disable(struct block_sequence_state *seq_state, + struct dwbc *dwb); + +void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct gsl_params gsl); + +void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int group_idx, + uint32_t gsl_ready_signal); + +void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t mall_sel, + bool cache_cursor); + +void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable); + +void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable); + +void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool disable); + +void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset), + bool reset); + +void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable); + +void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state, + struct dpp *dpp, + bool dppclk_div, + bool enable); + +void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg); + +void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + int dsc_inst, + bool is_ungated); + +void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated); + +void hwss_add_dsc_disable(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated); + +void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dsc_inst, + bool *is_ungated); + +void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool clock_on); + +void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); + +void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); + +void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool blank); + +void hwss_add_hubp_init(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_hubp_reset(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_dpp_reset(struct block_sequence_state *seq_state, + struct dpp *dpp); + +void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool enable); + +void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint64_t sys_default, + uint64_t sys_low, + uint64_t sys_high); + +void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dpp_inst, + int dppclk_khz); + +void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t otg_inst); + +void hwss_add_hubp_setup2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs, + union dml2_global_sync_programming *global_sync, + struct dc_crtc_timing *timing); + +void hwss_add_hubp_setup(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs, + struct _vcs_dpi_display_rq_regs_st *rq_regs, + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); + +void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool unbounded_req); + +void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs); + +void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs); +void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state, + struct hubp *hubp, + enum surface_pixel_format format, + struct dc_tiling_info *tiling_info, + struct plane_size plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + int compat_level); + +void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state, + struct dpp *dpp, + enum dc_color_space color_space, + struct dc_csc_transform *cursor_csc_color_matrix); + +void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpcc_blnd_cfg blnd_cfg, + int mpcc_id); + +void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, + int mpcc_id); + +void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpc_tree *mpc_tree_params, + struct mpcc_blnd_cfg blnd_cfg, + struct mpcc_sm_cfg *sm_cfg, + struct mpcc *insert_above_mpcc, + int dpp_id, + int mpcc_id); + +void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state, + struct dpp *dpp, + const struct scaler_data *scl_data); + +void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state, + struct hubp *hubp, + const struct rect *viewport, + const struct rect *viewport_c); + +void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_set_cursor_position(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_program_output_csc(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id); + +void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_update_force_pstate(struct block_sequence_state *seq_state, + struct dc *dc, + struct dc_state *context); + +void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state, + struct hubbub *hubbub); + +void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + bool allow, + bool *disallow_self_refresh_applied); + +void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state, + struct timing_generator *tg, + unsigned int *frame_count); + +void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct dsc_optc_config *dsc_optc_cfg, + bool enable); + +void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum dc_pixel_encoding pixel_encoding, + bool is_otg_master); + #endif /* __DC_HW_SEQUENCER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 1e2d247fbbac..406db231bc72 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -27,6 +27,7 @@ #define __DC_HW_SEQUENCER_PRIVATE_H__ #include "dc_types.h" +#include "hw_sequencer.h" enum pipe_gating_control { PIPE_GATING_CONTROL_DISABLE = 0, @@ -80,7 +81,13 @@ struct hwseq_private_funcs { void (*plane_atomic_disconnect)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); + void (*plane_atomic_disconnect_sequence)(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*update_mpcc_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); bool (*set_input_transfer_func)(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); @@ -97,6 +104,10 @@ struct hwseq_private_funcs { void (*blank_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); + void (*blank_pixel_data_sequence)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank, + struct block_sequence_state *seq_state); enum dc_status (*enable_stream_timing)( struct pipe_ctx *pipe_ctx, struct dc_state *context, @@ -105,6 +116,8 @@ struct hwseq_private_funcs { bool enable); void (*setup_vupdate_interrupt)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*setup_vupdate_interrupt_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); void (*init_blank)(struct dc *dc, struct timing_generator *tg); void (*disable_vga)(struct dce_hwseq *hws); @@ -112,6 +125,10 @@ struct hwseq_private_funcs { void (*plane_atomic_power_down)(struct dc *dc, struct dpp *dpp, struct hubp *hubp); + void (*plane_atomic_power_down_sequence)(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp, + struct block_sequence_state *seq_state); void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx); void (*enable_power_gating_plane)(struct dce_hwseq *hws, bool enable); @@ -140,15 +157,31 @@ struct hwseq_private_funcs { unsigned int dsc_inst); void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); + void (*update_odm_sequence)(struct dc *dc, struct dc_state *context, + struct pipe_ctx *pipe_ctx, struct block_sequence_state *seq_state); void (*program_all_writeback_pipes_in_tree)(struct dc *dc, const struct dc_stream_state *stream, struct dc_state *context); + void (*program_all_writeback_pipes_in_tree_sequence)( + struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context, + struct block_sequence_state *seq_state); bool (*s0i3_golden_init_wa)(struct dc *dc); void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx); + void (*set_hdr_multiplier_sequence)(struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*verify_allow_pstate_change_high)(struct dc *dc); + void (*verify_allow_pstate_change_high_sequence)(struct dc *dc, + struct block_sequence_state *seq_state); void (*program_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); + void (*program_pipe_sequence)( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); bool (*wait_for_blank_complete)(struct output_pixel_processor *opp); void (*dccg_init)(struct dce_hwseq *hws); bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx, @@ -163,6 +196,8 @@ struct hwseq_private_funcs { void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context); + void (*program_mall_pipe_config_sequence)(struct dc *dc, struct dc_state *context, + struct block_sequence_state *seq_state); void (*update_force_pstate)(struct dc *dc, struct dc_state *context); void (*update_mall_sel)(struct dc *dc, struct dc_state *context); unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx, @@ -186,6 +221,7 @@ struct hwseq_private_funcs { void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx); void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only); void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*dc_ip_request_cntl)(struct dc *dc, bool enable); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index d11893f8c916..5ed2cd344804 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -58,8 +58,8 @@ #include "transform.h" #include "dpp.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" -#include "dml2/dml21/inc/dml_top_types.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_types.h" struct resource_pool; struct dc_state; @@ -274,7 +274,7 @@ struct resource_pool { /* An array for accessing the link encoder objects that have been created. * Index in array corresponds to engine ID - viz. 0: ENGINE_ID_DIGA */ - struct link_encoder *link_encoders[MAX_DIG_LINK_ENCODERS]; + struct link_encoder *link_encoders[MAX_LINK_ENCODERS]; /* Number of DIG link encoder objects created - i.e. number of valid * entries in link_encoders array. */ @@ -514,7 +514,7 @@ struct pipe_ctx { struct link_enc_cfg_context { enum link_enc_cfg_mode mode; struct link_enc_assignment link_enc_assignments[MAX_PIPES]; - enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS]; + enum engine_id link_enc_avail[MAX_LINK_ENCODERS]; struct link_enc_assignment transient_assignments[MAX_PIPES]; }; @@ -526,8 +526,8 @@ struct resource_context { uint8_t dp_clock_source_ref_count; bool is_dsc_acquired[MAX_PIPES]; struct link_enc_cfg_context link_enc_cfg_ctx; - unsigned int dio_link_enc_to_link_idx[MAX_DIG_LINK_ENCODERS]; - int dio_link_enc_ref_cnts[MAX_DIG_LINK_ENCODERS]; + unsigned int dio_link_enc_to_link_idx[MAX_LINK_ENCODERS]; + int dio_link_enc_ref_cnts[MAX_LINK_ENCODERS]; bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS]; unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h index 45645f9fd86c..7ce2f417f86a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h @@ -57,9 +57,9 @@ struct cursor_attribute_cache_hubp { } size; union reg_cursor_settings_cfg { struct { - uint32_t dst_y_offset: 8; - uint32_t chunk_hdl_adjust: 2; - uint32_t reserved: 22; + uint32_t dst_y_offset: 8; + uint32_t chunk_hdl_adjust: 2; + uint32_t reserved: 22; } bits; uint32_t raw; } settings; @@ -83,12 +83,34 @@ union reg_cur0_control_cfg { } bits; uint32_t raw; }; + struct cursor_position_cache_dpp { union reg_cur0_control_cfg cur0_ctl; }; struct cursor_attribute_cache_dpp { union reg_cur0_control_cfg cur0_ctl; + union reg_cur0_fp_scale_bias { + struct { + uint32_t fp_bias: 16; + uint32_t fp_scale: 16; + } bits; + uint32_t raw; + } fp_scale_bias; + union reg_cur0_fp_scale_bias_g_y { + struct { + uint32_t fp_bias_g_y: 16; + uint32_t fp_scale_g_y: 16; + } bits; + uint32_t raw; + } fp_scale_bias_g_y; + union reg_cur0_fp_scale_bias_rb_crcb { + struct { + uint32_t fp_bias_rb_crcb: 16; + uint32_t fp_scale_rb_crcb: 16; + } bits; + uint32_t raw; + } fp_scale_bias_rb_crcb; }; struct cursor_attributes_cfg { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 61c4d2a7db1c..500a601e99b5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -71,6 +71,125 @@ enum pixel_rate_div { PIXEL_RATE_DIV_NA = 0xF }; +struct dcn_dccg_reg_state { + uint32_t dc_mem_global_pwr_req_cntl; + uint32_t dccg_audio_dtbclk_dto_modulo; + uint32_t dccg_audio_dtbclk_dto_phase; + uint32_t dccg_audio_dto_source; + uint32_t dccg_audio_dto0_module; + uint32_t dccg_audio_dto0_phase; + uint32_t dccg_audio_dto1_module; + uint32_t dccg_audio_dto1_phase; + uint32_t dccg_cac_status; + uint32_t dccg_cac_status2; + uint32_t dccg_disp_cntl_reg; + uint32_t dccg_ds_cntl; + uint32_t dccg_ds_dto_incr; + uint32_t dccg_ds_dto_modulo; + uint32_t dccg_ds_hw_cal_interval; + uint32_t dccg_gate_disable_cntl; + uint32_t dccg_gate_disable_cntl2; + uint32_t dccg_gate_disable_cntl3; + uint32_t dccg_gate_disable_cntl4; + uint32_t dccg_gate_disable_cntl5; + uint32_t dccg_gate_disable_cntl6; + uint32_t dccg_global_fgcg_rep_cntl; + uint32_t dccg_gtc_cntl; + uint32_t dccg_gtc_current; + uint32_t dccg_gtc_dto_incr; + uint32_t dccg_gtc_dto_modulo; + uint32_t dccg_perfmon_cntl; + uint32_t dccg_perfmon_cntl2; + uint32_t dccg_soft_reset; + uint32_t dccg_test_clk_sel; + uint32_t dccg_vsync_cnt_ctrl; + uint32_t dccg_vsync_cnt_int_ctrl; + uint32_t dccg_vsync_otg0_latch_value; + uint32_t dccg_vsync_otg1_latch_value; + uint32_t dccg_vsync_otg2_latch_value; + uint32_t dccg_vsync_otg3_latch_value; + uint32_t dccg_vsync_otg4_latch_value; + uint32_t dccg_vsync_otg5_latch_value; + uint32_t dispclk_cgtt_blk_ctrl_reg; + uint32_t dispclk_freq_change_cntl; + uint32_t dp_dto_dbuf_en; + uint32_t dp_dto0_modulo; + uint32_t dp_dto0_phase; + uint32_t dp_dto1_modulo; + uint32_t dp_dto1_phase; + uint32_t dp_dto2_modulo; + uint32_t dp_dto2_phase; + uint32_t dp_dto3_modulo; + uint32_t dp_dto3_phase; + uint32_t dpiaclk_540m_dto_modulo; + uint32_t dpiaclk_540m_dto_phase; + uint32_t dpiaclk_810m_dto_modulo; + uint32_t dpiaclk_810m_dto_phase; + uint32_t dpiaclk_dto_cntl; + uint32_t dpiasymclk_cntl; + uint32_t dppclk_cgtt_blk_ctrl_reg; + uint32_t dppclk_ctrl; + uint32_t dppclk_dto_ctrl; + uint32_t dppclk0_dto_param; + uint32_t dppclk1_dto_param; + uint32_t dppclk2_dto_param; + uint32_t dppclk3_dto_param; + uint32_t dprefclk_cgtt_blk_ctrl_reg; + uint32_t dprefclk_cntl; + uint32_t dpstreamclk_cntl; + uint32_t dscclk_dto_ctrl; + uint32_t dscclk0_dto_param; + uint32_t dscclk1_dto_param; + uint32_t dscclk2_dto_param; + uint32_t dscclk3_dto_param; + uint32_t dtbclk_dto_dbuf_en; + uint32_t dtbclk_dto0_modulo; + uint32_t dtbclk_dto0_phase; + uint32_t dtbclk_dto1_modulo; + uint32_t dtbclk_dto1_phase; + uint32_t dtbclk_dto2_modulo; + uint32_t dtbclk_dto2_phase; + uint32_t dtbclk_dto3_modulo; + uint32_t dtbclk_dto3_phase; + uint32_t dtbclk_p_cntl; + uint32_t force_symclk_disable; + uint32_t hdmicharclk0_clock_cntl; + uint32_t hdmistreamclk_cntl; + uint32_t hdmistreamclk0_dto_param; + uint32_t microsecond_time_base_div; + uint32_t millisecond_time_base_div; + uint32_t otg_pixel_rate_div; + uint32_t otg0_phypll_pixel_rate_cntl; + uint32_t otg0_pixel_rate_cntl; + uint32_t otg1_phypll_pixel_rate_cntl; + uint32_t otg1_pixel_rate_cntl; + uint32_t otg2_phypll_pixel_rate_cntl; + uint32_t otg2_pixel_rate_cntl; + uint32_t otg3_phypll_pixel_rate_cntl; + uint32_t otg3_pixel_rate_cntl; + uint32_t phyasymclk_clock_cntl; + uint32_t phybsymclk_clock_cntl; + uint32_t phycsymclk_clock_cntl; + uint32_t phydsymclk_clock_cntl; + uint32_t phyesymclk_clock_cntl; + uint32_t phyplla_pixclk_resync_cntl; + uint32_t phypllb_pixclk_resync_cntl; + uint32_t phypllc_pixclk_resync_cntl; + uint32_t phyplld_pixclk_resync_cntl; + uint32_t phyplle_pixclk_resync_cntl; + uint32_t refclk_cgtt_blk_ctrl_reg; + uint32_t socclk_cgtt_blk_ctrl_reg; + uint32_t symclk_cgtt_blk_ctrl_reg; + uint32_t symclk_psp_cntl; + uint32_t symclk32_le_cntl; + uint32_t symclk32_se_cntl; + uint32_t symclka_clock_enable; + uint32_t symclkb_clock_enable; + uint32_t symclkc_clock_enable; + uint32_t symclkd_clock_enable; + uint32_t symclke_clock_enable; +}; + struct dccg { struct dc_context *ctx; const struct dccg_funcs *funcs; @@ -81,7 +200,6 @@ struct dccg { //int audio_dtbclk_khz;/* TODO needs to be removed */ //int ref_dtbclk_khz;/* TODO needs to be removed */ }; - struct dtbclk_dto_params { const struct dc_crtc_timing *timing; int otg_inst; @@ -214,6 +332,7 @@ struct dccg_funcs { void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h); void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst); void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); + void (*dccg_read_reg_state)(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state); }; #endif //__DAL_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 843a18287c83..1ddfa30411c8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -137,6 +137,14 @@ struct dcn_hubbub_state { uint32_t dram_state_cntl; }; +struct dcn_hubbub_reg_state { + uint32_t det0_ctrl; + uint32_t det1_ctrl; + uint32_t det2_ctrl; + uint32_t det3_ctrl; + uint32_t compbuf_ctrl; +}; + struct hubbub_system_latencies { uint32_t max_latency_ns; uint32_t avg_latency_ns; @@ -216,6 +224,8 @@ struct hubbub_funcs { void (*init_watermarks)(struct hubbub *hubbub); + void (*hubbub_read_reg_state)(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state); + /** * @program_det_size: * @@ -242,17 +252,39 @@ struct hubbub_funcs { void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst); bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower); - void (*get_det_sizes)(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes); - uint32_t (*compbuf_config_error)(struct hubbub *hubbub); - struct hubbub_perfmon_funcs{ - void (*start_system_latency_measurement)(struct hubbub *hubbub); - void (*get_system_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, struct hubbub_system_latencies *latencies); - void (*start_in_order_bandwidth_measurement)(struct hubbub *hubbub); - void (*get_in_order_bandwidth_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *bandwidth_mbps); - void (*start_urgent_ramp_latency_measurement)(struct hubbub *hubbub, const struct hubbub_urgent_latency_params *params); - void (*get_urgent_ramp_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *latency_ns); + void (*dchvm_init)(struct hubbub *hubbub); + + struct hubbub_perfmon_funcs { void (*reset)(struct hubbub *hubbub); + void (*start_measuring_max_memory_latency_ns)( + struct hubbub *hubbub); + uint32_t (*get_max_memory_latency_ns)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t *sample_count); + void (*start_measuring_average_memory_latency_ns)( + struct hubbub *hubbub); + uint32_t (*get_average_memory_latency_ns)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t *sample_count); + void (*start_measuring_urgent_ramp_latency_ns)( + struct hubbub *hubbub, + const struct hubbub_urgent_latency_params *params); + uint32_t (*get_urgent_ramp_latency_ns)(struct hubbub *hubbub, + uint32_t refclk_mhz); + void (*start_measuring_unbounded_bandwidth_mbps)( + struct hubbub *hubbub); + uint32_t (*get_unbounded_bandwidth_mbps)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t *duration_ns); + void (*start_measuring_average_bandwidth_mbps)( + struct hubbub *hubbub); + uint32_t (*get_average_bandwidth_mbps)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t min_duration_ns, + uint32_t *duration_ns); } perfmon; + + struct hubbub_qos_funcs { + void (*force_display_nominal_profile)(struct hubbub *hubbub); + void (*force_display_urgent_profile)(struct hubbub *hubbub); + void (*reset_display_qos_profile)(struct hubbub *hubbub); + } qos; }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 1b7c085dc2cc..d88b57d4f512 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -65,7 +65,6 @@ union defer_reg_writes { } bits; uint32_t raw; }; - struct dpp { const struct dpp_funcs *funcs; struct dc_context *ctx; @@ -84,6 +83,7 @@ struct dpp { struct pwl_params shaper_params; bool cm_bypass_mode; + bool cursor_offload; struct cursor_position_cache_dpp pos; struct cursor_attribute_cache_dpp att; @@ -202,6 +202,19 @@ struct dcn_dpp_state { uint32_t gamcor_mode; }; +struct dcn_dpp_reg_state { + uint32_t recout_start; + uint32_t recout_size; + uint32_t scl_horz_filter_scale_ratio; + uint32_t scl_vert_filter_scale_ratio; + uint32_t scl_mode; + uint32_t cm_control; + uint32_t dpp_control; + uint32_t dscl_control; + uint32_t obuf_control; + uint32_t mpc_size; +}; + struct CM_bias_params { uint32_t cm_bias_cr_r; uint32_t cm_bias_y_g; @@ -225,6 +238,8 @@ struct dpp_funcs { void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s); + void (*dpp_read_reg_state)(struct dpp *dpp, struct dcn_dpp_reg_state *dpp_reg_state); + void (*dpp_reset)(struct dpp *dpp); void (*dpp_set_scaler)(struct dpp *dpp, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 2b874d2cc61c..a79019365af8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -41,8 +41,8 @@ #include "mem_input.h" #include "cursor_reg_cache.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" -#include "dml2/dml21/inc/dml_top_types.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_types.h" #define OPP_ID_INVALID 0xf #define MAX_TTU 0xffffff @@ -126,11 +126,13 @@ struct hubp { int mpcc_id; struct dc_cursor_attributes curs_attr; struct dc_cursor_position curs_pos; + bool cursor_offload; bool power_gated; struct cursor_position_cache_hubp pos; struct cursor_attribute_cache_hubp att; struct cursor_rect cur_rect; + bool use_mall_for_cursor; }; struct surface_flip_registers { @@ -236,6 +238,7 @@ struct hubp_funcs { void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); void (*hubp_read_state)(struct hubp *hubp); + void (*hubp_read_reg_state)(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state); void (*hubp_clear_underflow)(struct hubp *hubp); void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp); unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 62a39204fe0b..a61d12ec61bc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -51,11 +51,60 @@ #define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS) +/** + * define MAX_DIG_LINK_ENCODERS - maximum number of digital encoders + * + * Digital encoders are ENGINE_ID_DIGA...G, there are at most 7, + * although not every GPU may have that many. + */ #define MAX_DIG_LINK_ENCODERS 7 + +/** + * define MAX_DAC_LINK_ENCODERS - maximum number of analog link encoders + * + * Analog encoders are ENGINE_ID_DACA/B, there are at most 2, + * although not every GPU may have that many. Modern GPUs typically + * don't have analog encoders. + */ +#define MAX_DAC_LINK_ENCODERS 2 + +/** + * define MAX_LINK_ENCODERS - maximum number link encoders in total + * + * This includes both analog and digital encoders. + */ +#define MAX_LINK_ENCODERS (MAX_DIG_LINK_ENCODERS + MAX_DAC_LINK_ENCODERS) + #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 #define MAX_HPO_DP2_LINK_ENCODERS 4 +/* Pipe topology snapshot structures */ +#define MAX_TOPOLOGY_SNAPSHOTS 4 + +struct pipe_topology_line { + bool is_phantom_pipe; + int plane_idx; + int slice_idx; + int stream_idx; + int dpp_inst; + int opp_inst; + int tg_inst; +}; + +struct pipe_topology_snapshot { + struct pipe_topology_line pipe_log_lines[MAX_PIPES]; + int line_count; + uint64_t timestamp_us; + int stream_count; + int phantom_stream_count; +}; + +struct pipe_topology_history { + struct pipe_topology_snapshot snapshots[MAX_TOPOLOGY_SNAPSHOTS]; + int current_snapshot_index; +}; + struct gamma_curve { uint32_t offset; uint32_t segments_num; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 08c16ba52a51..df512920a9fa 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -47,6 +47,7 @@ struct encoder_init_data { enum hpd_source_id hpd_source; /* TODO: in DAL2, here was pointer to EventManagerInterface */ struct graphics_object_id encoder; + enum engine_id analog_engine; struct dc_context *ctx; enum transmitter transmitter; }; @@ -83,6 +84,7 @@ struct link_encoder { struct graphics_object_id connector; uint32_t output_signals; enum engine_id preferred_engine; + enum engine_id analog_engine; struct encoder_feature_support features; enum transmitter transmitter; enum hpd_source_id hpd_source; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index 42fbc70f7056..d468bc85566a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -29,7 +29,7 @@ #include "include/grph_object_id.h" #include "dml/display_mode_structs.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" struct dchub_init_data; struct cstate_pstate_watermarks_st { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 22960ee03dee..a8d1abe20f62 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -350,6 +350,15 @@ struct mpcc_state { struct mpc_rmcm_regs rmcm_regs; }; +struct dcn_mpc_reg_state { + uint32_t mpcc_bot_sel; + uint32_t mpcc_control; + uint32_t mpcc_status; + uint32_t mpcc_top_sel; + uint32_t mpcc_opp_id; + uint32_t mpcc_ogam_control; +}; + /** * struct mpc_funcs - funcs */ @@ -373,6 +382,24 @@ struct mpc_funcs { struct mpc *mpc, int mpcc_inst, struct mpcc_state *s); + /** + * @mpc_read_reg_state: + * + * Read MPC register state for debugging underflow purposes. + * + * Parameters: + * + * - [in] mpc - MPC context + * - [out] reg_state - MPC register state structure + * + * Return: + * + * void + */ + void (*mpc_read_reg_state)( + struct mpc *mpc, + int mpcc_inst, + struct dcn_mpc_reg_state *mpc_reg_state); /** * @insert_plane: diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 747679cb4944..e1428a83ecbc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -297,6 +297,16 @@ struct oppbuf_params { uint32_t num_segment_padded_pixels; }; +struct dcn_opp_reg_state { + uint32_t dpg_control; + uint32_t fmt_control; + uint32_t oppbuf_control; + uint32_t opp_pipe_control; + uint32_t opp_pipe_crc_control; + uint32_t opp_abm_control; + uint32_t dscrm_dsc_forward_config; +}; + struct opp_funcs { @@ -368,6 +378,9 @@ struct opp_funcs { struct output_pixel_processor *opp, enum dc_pixel_encoding pixel_encoding, bool is_primary); + + void (*opp_read_reg_state)( + struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index f2de2cf23859..da7bf59c4b9d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -175,6 +175,135 @@ struct dcn_otg_state { uint32_t otg_double_buffer_control; }; +struct dcn_optc_reg_state { + uint32_t optc_bytes_per_pixel; + uint32_t optc_data_format_control; + uint32_t optc_data_source_select; + uint32_t optc_input_clock_control; + uint32_t optc_input_global_control; + uint32_t optc_input_spare_register; + uint32_t optc_memory_config; + uint32_t optc_rsmu_underflow; + uint32_t optc_underflow_threshold; + uint32_t optc_width_control; + + uint32_t otg_3d_structure_control; + uint32_t otg_clock_control; + uint32_t otg_control; + uint32_t otg_count_control; + uint32_t otg_count_reset; + uint32_t otg_crc_cntl; + uint32_t otg_crc_sig_blue_control_mask; + uint32_t otg_crc_sig_red_green_mask; + uint32_t otg_crc0_data_b; + uint32_t otg_crc0_data_rg; + uint32_t otg_crc0_windowa_x_control; + uint32_t otg_crc0_windowa_x_control_readback; + uint32_t otg_crc0_windowa_y_control; + uint32_t otg_crc0_windowa_y_control_readback; + uint32_t otg_crc0_windowb_x_control; + uint32_t otg_crc0_windowb_x_control_readback; + uint32_t otg_crc0_windowb_y_control; + uint32_t otg_crc0_windowb_y_control_readback; + uint32_t otg_crc1_data_b; + uint32_t otg_crc1_data_rg; + uint32_t otg_crc1_windowa_x_control; + uint32_t otg_crc1_windowa_x_control_readback; + uint32_t otg_crc1_windowa_y_control; + uint32_t otg_crc1_windowa_y_control_readback; + uint32_t otg_crc1_windowb_x_control; + uint32_t otg_crc1_windowb_x_control_readback; + uint32_t otg_crc1_windowb_y_control; + uint32_t otg_crc1_windowb_y_control_readback; + uint32_t otg_crc2_data_b; + uint32_t otg_crc2_data_rg; + uint32_t otg_crc3_data_b; + uint32_t otg_crc3_data_rg; + uint32_t otg_dlpc_control; + uint32_t otg_double_buffer_control; + uint32_t otg_drr_control2; + uint32_t otg_drr_control; + uint32_t otg_drr_timing_int_status; + uint32_t otg_drr_trigger_window; + uint32_t otg_drr_v_total_change; + uint32_t otg_drr_v_total_reach_range; + uint32_t otg_dsc_start_position; + uint32_t otg_force_count_now_cntl; + uint32_t otg_global_control0; + uint32_t otg_global_control1; + uint32_t otg_global_control2; + uint32_t otg_global_control3; + uint32_t otg_global_control4; + uint32_t otg_global_sync_status; + uint32_t otg_gsl_control; + uint32_t otg_gsl_vsync_gap; + uint32_t otg_gsl_window_x; + uint32_t otg_gsl_window_y; + uint32_t otg_h_blank_start_end; + uint32_t otg_h_sync_a; + uint32_t otg_h_sync_a_cntl; + uint32_t otg_h_timing_cntl; + uint32_t otg_h_total; + uint32_t otg_interlace_control; + uint32_t otg_interlace_status; + uint32_t otg_interrupt_control; + uint32_t otg_long_vblank_status; + uint32_t otg_m_const_dto0; + uint32_t otg_m_const_dto1; + uint32_t otg_manual_force_vsync_next_line; + uint32_t otg_master_en; + uint32_t otg_master_update_lock; + uint32_t otg_master_update_mode; + uint32_t otg_nom_vert_position; + uint32_t otg_pipe_update_status; + uint32_t otg_pixel_data_readback0; + uint32_t otg_pixel_data_readback1; + uint32_t otg_request_control; + uint32_t otg_snapshot_control; + uint32_t otg_snapshot_frame; + uint32_t otg_snapshot_position; + uint32_t otg_snapshot_status; + uint32_t otg_spare_register; + uint32_t otg_static_screen_control; + uint32_t otg_status; + uint32_t otg_status_frame_count; + uint32_t otg_status_hv_count; + uint32_t otg_status_position; + uint32_t otg_status_vf_count; + uint32_t otg_stereo_control; + uint32_t otg_stereo_force_next_eye; + uint32_t otg_stereo_status; + uint32_t otg_trig_manual_control; + uint32_t otg_triga_cntl; + uint32_t otg_triga_manual_trig; + uint32_t otg_trigb_cntl; + uint32_t otg_trigb_manual_trig; + uint32_t otg_update_lock; + uint32_t otg_v_blank_start_end; + uint32_t otg_v_count_stop_control; + uint32_t otg_v_count_stop_control2; + uint32_t otg_v_sync_a; + uint32_t otg_v_sync_a_cntl; + uint32_t otg_v_total; + uint32_t otg_v_total_control; + uint32_t otg_v_total_int_status; + uint32_t otg_v_total_max; + uint32_t otg_v_total_mid; + uint32_t otg_v_total_min; + uint32_t otg_vert_sync_control; + uint32_t otg_vertical_interrupt0_control; + uint32_t otg_vertical_interrupt0_position; + uint32_t otg_vertical_interrupt1_control; + uint32_t otg_vertical_interrupt1_position; + uint32_t otg_vertical_interrupt2_control; + uint32_t otg_vertical_interrupt2_position; + uint32_t otg_vready_param; + uint32_t otg_vstartup_param; + uint32_t otg_vsync_nom_int_status; + uint32_t otg_vupdate_keepout; + uint32_t otg_vupdate_param; +}; + /** * struct timing_generator - Entry point to Output Timing Generator feature. */ @@ -381,6 +510,7 @@ struct timing_generator_funcs { void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable); bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked); void (*read_otg_state)(struct timing_generator *tg, struct dcn_otg_state *s); + void (*optc_read_reg_state)(struct timing_generator *tg, struct dcn_optc_reg_state *optc_reg_state); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_service.h b/drivers/gpu/drm/amd/display/dc/inc/link_service.h index 1e34e84160aa..6f94e48a24d1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_service.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_service.h @@ -292,12 +292,12 @@ struct link_service { enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_data); bool (*edp_set_coasting_vtotal)( - struct dc_link *link, uint32_t coasting_vtotal); + struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number); bool (*edp_replay_residency)(const struct dc_link *link, unsigned int *residency, const bool is_start, const enum pr_residency_mode mode); bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, - const unsigned int *power_opts, uint32_t coasting_vtotal); + const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number); bool (*edp_wait_for_t12)(struct dc_link *link); bool (*edp_is_ilr_optimization_required)(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 4e26a16a8743..79746d931471 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -49,6 +49,7 @@ struct resource_caps { int num_video_plane; int num_audio; int num_stream_encoder; + int num_analog_stream_encoder; int num_pll; int num_dwb; int num_ddc; diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 2676ae9f6fe8..1045c268672e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -877,7 +877,7 @@ bool dp_set_test_pattern( return false; if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) { - if (should_use_dmub_lock(pipe_ctx->stream->link)) { + if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -925,7 +925,7 @@ bool dp_set_test_pattern( CRTC_STATE_VACTIVE); if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) { - if (should_use_dmub_lock(pipe_ctx->stream->link)) { + if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c index 892907991f91..befa67b2b2ae 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c @@ -58,8 +58,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx) return; } - link_enc->funcs->connect_dig_be_to_fe(link_enc, - pipe_ctx->stream_res.stream_enc->id, true); + if (!dc_is_rgb_signal(pipe_ctx->stream->signal)) + link_enc->funcs->connect_dig_be_to_fe(link_enc, + pipe_ctx->stream_res.stream_enc->id, true); if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); @@ -98,10 +99,13 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx) if (stream_enc->funcs->enable_stream) stream_enc->funcs->enable_stream(stream_enc, pipe_ctx->stream->signal, false); - link_enc->funcs->connect_dig_be_to_fe( - link_enc, - pipe_ctx->stream_res.stream_enc->id, - false); + + if (!dc_is_rgb_signal(pipe_ctx->stream->signal)) + link_enc->funcs->connect_dig_be_to_fe( + link_enc, + pipe_ctx->stream_res.stream_enc->id, + false); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence( pipe_ctx->stream->link, @@ -115,7 +119,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; - if (!dc_is_virtual_signal(stream->signal)) + if (!dc_is_virtual_signal(stream->signal) && + !dc_is_rgb_signal(stream->signal)) stream_encoder->funcs->setup_stereo_sync( stream_encoder, pipe_ctx->stream_res.tg->inst, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 85303167a553..6d31f4967f1a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -270,6 +270,10 @@ static void read_scdc_caps(struct ddc_service *ddc_service, uint8_t slave_address = HDMI_SCDC_ADDRESS; uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI; + if (ddc_service->link->local_sink && + !ddc_service->link->local_sink->edid_caps.scdc_present) + return; + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte, sizeof(sink->scdc_caps.manufacturer_OUI.byte)); @@ -858,6 +862,94 @@ static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, verify_link_capability_non_destructive(link); } +/** + * link_detect_evaluate_edid_header() - Evaluate if an EDID header is acceptable. + * + * Evaluates an 8-byte EDID header to check if it's good enough + * for the purpose of determining whether a display is connected + * without reading the full EDID. + * + * @edid_header: The first 8 bytes of the EDID read from DDC. + * + * Return: true if the header looks valid (>= 6 of 8 bytes match the + * expected 00/FF pattern), false otherwise. + */ +static bool link_detect_evaluate_edid_header(uint8_t edid_header[8]) +{ + int edid_header_score = 0; + int i; + + for (i = 0; i < 8; ++i) + edid_header_score += edid_header[i] == ((i == 0 || i == 7) ? 0x00 : 0xff); + + return edid_header_score >= 6; +} + +/** + * link_detect_ddc_probe() - Probe the DDC to see if a display is connected. + * + * Detect whether a display is connected to DDC without reading full EDID. + * Reads only the EDID header (the first 8 bytes of EDID) from DDC and + * evaluates whether that matches. + * + * @link: DC link whose DDC/I2C is probed for the EDID header. + * + * Return: true if the EDID header was read and passes validation, + * false otherwise. + */ +static bool link_detect_ddc_probe(struct dc_link *link) +{ + if (!link->ddc) + return false; + + uint8_t edid_header[8] = {0}; + bool ddc_probed = i2c_read(link->ddc, 0x50, edid_header, sizeof(edid_header)); + + if (!ddc_probed) + return false; + + if (!link_detect_evaluate_edid_header(edid_header)) + return false; + + return true; +} + +/** + * link_detect_dac_load_detect() - Performs DAC load detection. + * + * Load detection can be used to detect the presence of an + * analog display when we can't read DDC. This causes a visible + * visual glitch so it should be used sparingly. + * + * @link: DC link to test using the DAC load-detect path. + * + * Return: true if the VBIOS load-detect call reports OK, false + * otherwise. + */ +static bool link_detect_dac_load_detect(struct dc_link *link) +{ + struct dc_bios *bios = link->ctx->dc_bios; + struct link_encoder *link_enc = link->link_enc; + enum engine_id engine_id = link_enc->preferred_engine; + enum dal_device_type device_type = DEVICE_TYPE_CRT; + enum bp_result bp_result; + uint32_t enum_id; + + switch (engine_id) { + case ENGINE_ID_DACB: + enum_id = 2; + break; + case ENGINE_ID_DACA: + default: + engine_id = ENGINE_ID_DACA; + enum_id = 1; + break; + } + + bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id); + return bp_result == BP_RESULT_OK; +} + /* * detect_link_and_local_sink() - Detect if a sink is attached to a given link * @@ -942,6 +1034,12 @@ static bool detect_link_and_local_sink(struct dc_link *link, break; } + case SIGNAL_TYPE_RGB: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_RGB; + break; + } + case SIGNAL_TYPE_LVDS: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; sink_caps.signal = SIGNAL_TYPE_LVDS; @@ -1066,7 +1164,30 @@ static bool detect_link_and_local_sink(struct dc_link *link, DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n"); break; case EDID_NO_RESPONSE: + /* Analog connectors without EDID: + * - old monitor that actually doesn't have EDID + * - cheap DVI-A cable or adapter that doesn't connect DDC + */ + if (dc_connector_supports_analog(link->link_id.id)) { + /* If we didn't do DAC load detection yet, do it now + * to verify there really is a display connected. + */ + if (link->type != dc_connection_dac_load && + !link_detect_dac_load_detect(link)) { + if (prev_sink) + dc_sink_release(prev_sink); + link_disconnect_sink(link); + return false; + } + + DC_LOG_INFO("%s detected analog display without EDID\n", __func__); + link->type = dc_connection_dac_load; + sink->edid_caps.analog = true; + break; + } + DC_LOG_ERROR("No EDID read.\n"); + /* * Abort detection for non-DP connectors if we have * no EDID @@ -1133,14 +1254,23 @@ static bool detect_link_and_local_sink(struct dc_link *link, sink = prev_sink; prev_sink = NULL; } - query_hdcp_capability(sink->sink_signal, link); + + if (!sink->edid_caps.analog) + query_hdcp_capability(sink->sink_signal, link); } + /* DVI-I connector connected to analog display. */ + if ((link->link_id.id == CONNECTOR_ID_DUAL_LINK_DVII || + link->link_id.id == CONNECTOR_ID_SINGLE_LINK_DVII) && + sink->edid_caps.analog) + sink->sink_signal = SIGNAL_TYPE_RGB; + /* HDMI-DVI Dongle */ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && !sink->edid_caps.edid_hdmi) sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; else if (dc_is_dvi_signal(sink->sink_signal) && + dc_is_dvi_signal(link->connector_signal) && aud_support->hdmi_audio_native && sink->edid_caps.edid_hdmi) sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; @@ -1232,6 +1362,36 @@ static bool detect_link_and_local_sink(struct dc_link *link, return true; } +/** + * link_detect_analog() - Determines if an analog sink is connected. + * + * @link: DC link to evaluate (must support analog signalling). + * @type: Updated with the detected connection type: + * dc_connection_single (analog via DDC), + * dc_connection_dac_load (via load-detect), + * or dc_connection_none. + * + * Return: true if detection completed. + */ +static bool link_detect_analog(struct dc_link *link, enum dc_connection_type *type) +{ + /* Don't care about connectors that don't support an analog signal. */ + ASSERT(dc_connector_supports_analog(link->link_id.id)); + + if (link_detect_ddc_probe(link)) { + *type = dc_connection_single; + return true; + } + + if (link_detect_dac_load_detect(link)) { + *type = dc_connection_dac_load; + return true; + } + + *type = dc_connection_none; + return true; +} + /* * link_detect_connection_type() - Determine if there is a sink connected * @@ -1248,6 +1408,17 @@ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type * return true; } + /* Ignore the HPD pin (if any) for analog connectors. + * Instead rely on DDC and DAC. + * + * - VGA connectors don't have any HPD at all. + * - Some DVI-A cables don't connect the HPD pin. + * - Some DVI-A cables pull up the HPD pin. + * (So it's high even when no display is connected.) + */ + if (dc_connector_supports_analog(link->link_id.id)) + return link_detect_analog(link, type); + if (link->connector_signal == SIGNAL_TYPE_EDP) { /*in case it is not on*/ if (!link->dc->config.edp_no_power_sequencing) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 83419e1a9036..6ae134147617 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -841,6 +841,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; if (should_use_dto_dscclk) dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); @@ -970,6 +971,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi dsc_cfg.color_depth = stream->timing.display_color_depth; dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps)); @@ -2224,7 +2226,11 @@ static enum dc_status enable_link( { enum dc_status status = DC_ERROR_UNEXPECTED; struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; + struct dc_link *link = NULL; + + if (stream == NULL) + return DC_ERROR_UNEXPECTED; + link = stream->link; /* There's some scenarios where driver is unloaded with display * still enabled. When driver is reloaded, it may cause a display @@ -2256,6 +2262,9 @@ static enum dc_status enable_link( enable_link_lvds(pipe_ctx); status = DC_OK; break; + case SIGNAL_TYPE_RGB: + status = DC_OK; + break; case SIGNAL_TYPE_VIRTUAL: status = enable_link_virtual(pipe_ctx); break; @@ -2458,6 +2467,7 @@ void link_set_dpms_on( struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc; enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); bool apply_edp_fast_boot_optimization = pipe_ctx->stream->apply_edp_fast_boot_optimization; @@ -2502,6 +2512,8 @@ void link_set_dpms_on( pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); } + link_hwss->setup_stream_attribute(pipe_ctx); + pipe_ctx->stream->apply_edp_fast_boot_optimization = false; // Enable VPG before building infoframe diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 31a73867cd4c..a6e2b0821969 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -451,6 +451,46 @@ static enum channel_id get_ddc_line(struct dc_link *link) return channel; } +static enum engine_id find_analog_engine(struct dc_link *link) +{ + struct dc_bios *bp = link->ctx->dc_bios; + struct graphics_object_id encoder = {0}; + enum bp_result bp_result = BP_RESULT_OK; + int i; + + for (i = 0; i < 3; i++) { + bp_result = bp->funcs->get_src_obj(bp, link->link_id, i, &encoder); + + if (bp_result != BP_RESULT_OK) + return ENGINE_ID_UNKNOWN; + + switch (encoder.id) { + case ENCODER_ID_INTERNAL_DAC1: + case ENCODER_ID_INTERNAL_KLDSCP_DAC1: + return ENGINE_ID_DACA; + case ENCODER_ID_INTERNAL_DAC2: + case ENCODER_ID_INTERNAL_KLDSCP_DAC2: + return ENGINE_ID_DACB; + } + } + + return ENGINE_ID_UNKNOWN; +} + +static bool transmitter_supported(const enum transmitter transmitter) +{ + return transmitter != TRANSMITTER_UNKNOWN && + transmitter != TRANSMITTER_NUTMEG_CRT && + transmitter != TRANSMITTER_TRAVIS_CRT && + transmitter != TRANSMITTER_TRAVIS_LCD; +} + +static bool analog_engine_supported(const enum engine_id engine_id) +{ + return engine_id == ENGINE_ID_DACA || + engine_id == ENGINE_ID_DACB; +} + static bool construct_phy(struct dc_link *link, const struct link_init_data *init_params) { @@ -482,10 +522,23 @@ static bool construct_phy(struct dc_link *link, link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index); + /* Determine early if the link has any supported encoders, + * so that we avoid initializing DDC and HPD, etc. + */ + bp_funcs->get_src_obj(bios, link->link_id, 0, &enc_init_data.encoder); + enc_init_data.transmitter = translate_encoder_to_transmitter(enc_init_data.encoder); + enc_init_data.analog_engine = find_analog_engine(link); + link->ep_type = DISPLAY_ENDPOINT_PHY; DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id); + if (!transmitter_supported(enc_init_data.transmitter) && + !analog_engine_supported(enc_init_data.analog_engine)) { + DC_LOG_WARNING("link_id %d has unsupported encoder\n", link->link_id.id); + goto unsupported_fail; + } + if (bios->funcs->get_disp_connector_caps_info) { bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info); link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY; @@ -530,6 +583,9 @@ static bool construct_phy(struct dc_link *link, case CONNECTOR_ID_DUAL_LINK_DVII: link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; break; + case CONNECTOR_ID_VGA: + link->connector_signal = SIGNAL_TYPE_RGB; + break; case CONNECTOR_ID_DISPLAY_PORT: case CONNECTOR_ID_MXM: case CONNECTOR_ID_USBC: @@ -611,16 +667,12 @@ static bool construct_phy(struct dc_link *link, dal_ddc_get_line(get_ddc_pin(link->ddc)); enc_init_data.ctx = dc_ctx; - bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, - &enc_init_data.encoder); enc_init_data.connector = link->link_id; enc_init_data.channel = get_ddc_line(link); enc_init_data.hpd_source = get_hpd_line(link); link->hpd_src = enc_init_data.hpd_source; - enc_init_data.transmitter = - translate_encoder_to_transmitter(enc_init_data.encoder); link->link_enc = link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); @@ -735,6 +787,7 @@ static bool construct_phy(struct dc_link *link, link->psr_settings.psr_vtotal_control_support = false; link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED; DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__); return true; @@ -753,6 +806,7 @@ create_fail: link->hpd_gpio = NULL; } +unsupported_fail: DC_LOG_DC("BIOS object table - %s failed.\n", __func__); return false; } @@ -816,9 +870,7 @@ static bool construct_dpia(struct dc_link *link, /* TODO: Create link encoder */ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; - - /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ - link->wa_flags.dp_mot_reset_segment = true; + link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED; return true; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c index 267180e7bc48..5d2bcce2f669 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -549,7 +549,8 @@ void write_scdc_data(struct ddc_service *ddc_service, /*Lower than 340 Scramble bit from SCDC caps*/ if (ddc_service->link->local_sink && - ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) + (ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite || + !ddc_service->link->local_sink->edid_caps.scdc_present)) return; link_query_ddc_data(ddc_service, slave_address, &offset, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index b12c11bd6a14..ad90a0106938 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -357,7 +357,9 @@ bool dp_should_enable_fec(const struct dc_link *link) { bool force_disable = false; - if (link->fec_state == dc_link_fec_enabled) + if (link->dc->debug.disable_fec) + force_disable = true; + else if (link->fec_state == dc_link_fec_enabled) force_disable = false; else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && link->local_sink && @@ -424,6 +426,21 @@ static enum dc_link_rate get_link_rate_from_max_link_bw( return link_rate; } +static enum dc_lane_count get_lttpr_max_lane_count(struct dc_link *link) +{ + enum dc_lane_count lttpr_max_lane_count = LANE_COUNT_UNKNOWN; + + if (link->dpcd_caps.lttpr_caps.max_lane_count <= LANE_COUNT_DP_MAX) + lttpr_max_lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + + /* if bw_allocation is enabled and nrd_max_lane_count is set, use it */ + if (link->dpia_bw_alloc_config.bw_alloc_enabled && + link->dpia_bw_alloc_config.nrd_max_lane_count > 0) + lttpr_max_lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count; + + return lttpr_max_lane_count; +} + static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) { @@ -438,6 +455,11 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) break; } + /* if bw_allocation is enabled and nrd_max_link_rate is set, use it */ + if (link->dpia_bw_alloc_config.bw_alloc_enabled && + link->dpia_bw_alloc_config.nrd_max_link_rate > 0) + lttpr_max_link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate; + if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) lttpr_max_link_rate = LINK_RATE_UHBR20; else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) @@ -1691,7 +1713,7 @@ static bool retrieve_link_cap(struct dc_link *link) union edp_configuration_cap edp_config_cap; union dp_downstream_port_present ds_port = { 0 }; enum dc_status status = DC_ERROR_UNEXPECTED; - uint32_t read_dpcd_retry_cnt = 3; + uint32_t read_dpcd_retry_cnt = 20; int i; struct dp_sink_hw_fw_revision dp_hw_fw_revision; const uint32_t post_oui_delay = 30; // 30ms @@ -1734,12 +1756,13 @@ static bool retrieve_link_cap(struct dc_link *link) } dpcd_set_source_specific_data(link); - /* Sink may need to configure internals based on vendor, so allow some - * time before proceeding with possibly vendor specific transactions - */ - msleep(post_oui_delay); for (i = 0; i < read_dpcd_retry_cnt; i++) { + /* + * Sink may need to configure internals based on vendor, so allow some + * time before proceeding with possibly vendor specific transactions + */ + msleep(post_oui_delay); status = core_link_read_dpcd( link, DP_DPCD_REV, @@ -1845,6 +1868,12 @@ static bool retrieve_link_cap(struct dc_link *link) link->dpcd_caps.is_mst_capable = read_is_mst_supported(link); DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable)); + /* Some MST docks seem to NAK I2C writes to segment pointer with mot=0. */ + if (link->dpcd_caps.is_mst_capable) + link->wa_flags.dp_mot_reset_segment = true; + else + link->wa_flags.dp_mot_reset_segment = false; + get_active_converter_info(ds_port.byte, link); dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); @@ -2063,6 +2092,11 @@ static bool retrieve_link_cap(struct dc_link *link) link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw, sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw)); + core_link_read_dpcd(link, + DP_PANEL_REPLAY_CAPABILITY_SUPPORT, + &link->dpcd_caps.pr_caps_supported.raw, + sizeof(link->dpcd_caps.pr_caps_supported.raw)); + /* Read DP tunneling information. */ status = dpcd_get_tunneling_device_data(link); if (status != DC_OK) @@ -2241,6 +2275,7 @@ const struct dc_link_settings *dp_get_verified_link_cap( struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; + enum dc_lane_count lttpr_max_lane_count; enum dc_link_rate lttpr_max_link_rate; enum dc_link_rate cable_max_link_rate; struct resource_context *res_ctx = &link->dc->current_state->res_ctx; @@ -2305,8 +2340,11 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) /* Some LTTPR devices do not report valid DPCD revisions, if so, do not take it's link cap into consideration. */ if (link->dpcd_caps.lttpr_caps.revision.raw >= DPCD_REV_14) { - if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) - max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + lttpr_max_lane_count = get_lttpr_max_lane_count(link); + + if (lttpr_max_lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = lttpr_max_lane_count; + lttpr_max_link_rate = get_lttpr_max_link_rate(link); if (lttpr_max_link_rate < max_link_cap.link_rate) @@ -2412,6 +2450,11 @@ bool dp_verify_link_cap_with_retries( dp_trace_detect_lt_init(link); + DC_LOG_HW_LINK_TRAINING("%s: Link[%d] LinkRate=0x%x LaneCount=%d", + __func__, link->link_index, + known_limit_link_setting->link_rate, + known_limit_link_setting->lane_count); + if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && link->dc->debug.usbc_combo_phy_reset_wa) apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); @@ -2448,6 +2491,11 @@ bool dp_verify_link_cap_with_retries( dp_trace_lt_fail_count_update(link, fail_count, true); dp_trace_set_lt_end_timestamp(link, true); + DC_LOG_HW_LINK_TRAINING("%s: Link[%d] Exit. is_success=%d fail_count=%d", + __func__, link->link_index, + success, + fail_count); + return success; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index 8a3c18ae97a7..c958d3f600c8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -225,11 +225,6 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) bool ret = false; uint8_t val; - if (link->dc->debug.dpia_debug.bits.enable_bw_allocation_mode == false) { - DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode disabled", __func__, link->link_index); - return false; - } - val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ; if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) { @@ -273,17 +268,28 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) { DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)", __func__, link->link_index); - } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) { + } + + if (status & DP_TUNNELING_BW_REQUEST_FAILED) { DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw); - } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) { + } + + if (status & DP_TUNNELING_BW_ALLOC_CAP_CHANGED) { + link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link); + + DC_LOG_DEBUG("%s: Granularity changed on link(%d) new granularity=%d", + __func__, link->link_index, link->dpia_bw_alloc_config.bw_granularity); + } + + if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) { + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 693477413347..4b01ab0a5a7f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -398,10 +398,12 @@ bool dp_should_allow_hpd_rx_irq(const struct dc_link *link) * Don't handle RX IRQ unless one of following is met: * 1) The link is established (cur_link_settings != unknown) * 2) We know we're dealing with a branch device, SST or MST + * 3) The link is bw_alloc enabled. */ if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || - is_dp_branch_device(link)) + is_dp_branch_device(link) || + link->dpia_bw_alloc_config.bw_alloc_enabled) return true; return false; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 5e806edbb9f6..c56e69eb27ef 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -949,7 +949,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, /* Set power optimization flag */ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) { if (replay != NULL && link->replay_settings.replay_feature_enabled && - replay->funcs->replay_set_power_opt) { + replay->funcs->replay_set_power_opt) { replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst); link->replay_settings.replay_power_opt_active = *power_opts; } @@ -984,7 +984,117 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state) return true; } -bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream) +static bool edp_setup_panel_replay(struct dc_link *link, const struct dc_stream_state *stream) +{ + /* To-do: Setup Replay */ + struct dc *dc; + struct dmub_replay *replay; + int i; + unsigned int panel_inst; + struct replay_context replay_context = { 0 }; + unsigned int lineTimeInNs = 0; + + union panel_replay_enable_and_configuration_1 pr_config_1 = { 0 }; + union panel_replay_enable_and_configuration_2 pr_config_2 = { 0 }; + + union dpcd_alpm_configuration alpm_config; + + replay_context.controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + //Clear Panel Replay enable & config + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1, + (uint8_t *)&(pr_config_1.raw), sizeof(uint8_t)); + + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2, + (uint8_t *)&(pr_config_2.raw), sizeof(uint8_t)); + + if (!(link->replay_settings.config.replay_supported)) + return false; + + dc = link->ctx->dc; + + //not sure should keep or not + replay = dc->res_pool->replay; + + if (!replay) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel; + replay_context.digbe_inst = link->link_enc->transmitter; + replay_context.digfe_inst = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + replay_context.controllerId = + dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + lineTimeInNs = + ((stream->timing.h_total * 1000000) / + (stream->timing.pix_clk_100hz / 10)) + 1; + + replay_context.line_time_in_ns = lineTimeInNs; + + link->replay_settings.replay_feature_enabled = + replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst); + + if (link->replay_settings.replay_feature_enabled) { + pr_config_1.bits.PANEL_REPLAY_ENABLE = 1; + pr_config_1.bits.PANEL_REPLAY_CRC_ENABLE = 1; + pr_config_1.bits.IRQ_HPD_ASSDP_MISSING = 1; + pr_config_1.bits.IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR = 1; + pr_config_1.bits.IRQ_HPD_RFB_ERROR = 1; + pr_config_1.bits.IRQ_HPD_ACTIVE_FRAME_CRC_ERROR = 1; + pr_config_1.bits.PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE = 1; + pr_config_1.bits.PANEL_REPLAY_EARLY_TRANSPORT_ENABLE = 1; + + pr_config_2.bits.SINK_REFRESH_RATE_UNLOCK_GRANTED = 0; + pr_config_2.bits.SU_Y_GRANULARITY_EXT_VALUE_ENABLED = 0; + pr_config_2.bits.SU_REGION_SCAN_LINE_CAPTURE_INDICATION = 0; + + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1, + (uint8_t *)&(pr_config_1.raw), sizeof(uint8_t)); + + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2, + (uint8_t *)&(pr_config_2.raw), sizeof(uint8_t)); + + //ALPM Setup + memset(&alpm_config, 0, sizeof(alpm_config)); + alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0; + + if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) { + alpm_config.bits.ALPM_MODE_SEL = 1; + alpm_config.bits.ACDS_PERIOD_DURATION = 1; + } + + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_RECEIVER_ALPM_CONFIG, + &alpm_config.raw, + sizeof(alpm_config.raw)); + } + + return true; +} + +static bool edp_setup_freesync_replay(struct dc_link *link, const struct dc_stream_state *stream) { /* To-do: Setup Replay */ struct dc *dc; @@ -1080,6 +1190,18 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream return true; } +bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream) +{ + if (!link) + return false; + if (link->replay_settings.config.replay_version == DC_VESA_PANEL_REPLAY) + return edp_setup_panel_replay(link, stream); + else if (link->replay_settings.config.replay_version == DC_FREESYNC_REPLAY) + return edp_setup_freesync_replay(link, stream); + else + return false; +} + /* * This is general Interface for Replay to set an 32 bit variable to dmub * replay_FW_Message_type: Indicates which instruction or variable pass to DMUB @@ -1110,7 +1232,7 @@ bool edp_send_replay_cmd(struct dc_link *link, return true; } -bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1122,9 +1244,11 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; - if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { - replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst); + if (coasting_vtotal && (link->replay_settings.coasting_vtotal != coasting_vtotal || + link->replay_settings.frame_skip_number != frame_skip_number)) { + replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst, frame_skip_number); link->replay_settings.coasting_vtotal = coasting_vtotal; + link->replay_settings.frame_skip_number = frame_skip_number; } return true; @@ -1152,7 +1276,7 @@ bool edp_replay_residency(const struct dc_link *link, } bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, - const unsigned int *power_opts, uint32_t coasting_vtotal) + const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1163,13 +1287,16 @@ bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, /* Only both power and coasting vtotal changed, this func could return true */ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts && - coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { + (coasting_vtotal && + (link->replay_settings.coasting_vtotal != coasting_vtotal || + link->replay_settings.frame_skip_number != frame_skip_number))) { if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt_and_coasting_vtotal) { replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay, - *power_opts, panel_inst, coasting_vtotal); + *power_opts, panel_inst, coasting_vtotal, frame_skip_number); link->replay_settings.replay_power_opt_active = *power_opts; link->replay_settings.coasting_vtotal = coasting_vtotal; + link->replay_settings.frame_skip_number = frame_skip_number; } else return false; } else diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 62a6344e613e..dd79c7cd2828 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link, bool edp_send_replay_cmd(struct dc_link *link, enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_data); -bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal); +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number); bool edp_replay_residency(const struct dc_link *link, unsigned int *residency, const bool is_start, const enum pr_residency_mode mode); bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, - const unsigned int *power_opts, uint32_t coasting_vtotal); + const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number); bool edp_wait_for_t12(struct dc_link *link); bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c index 85298b8a1b5e..6bfd2c1294e5 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c @@ -1514,6 +1514,21 @@ static void mpc3_read_mpcc_state( MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut); } +void mpc3_read_reg_state( + struct mpc *mpc, + int mpcc_inst, struct dcn_mpc_reg_state *mpc_reg_state) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + mpc_reg_state->mpcc_bot_sel = REG_READ(MPCC_BOT_SEL[mpcc_inst]); + mpc_reg_state->mpcc_control = REG_READ(MPCC_CONTROL[mpcc_inst]); + mpc_reg_state->mpcc_ogam_control = REG_READ(MPCC_OGAM_CONTROL[mpcc_inst]); + mpc_reg_state->mpcc_opp_id = REG_READ(MPCC_OPP_ID[mpcc_inst]); + mpc_reg_state->mpcc_status = REG_READ(MPCC_STATUS[mpcc_inst]); + mpc_reg_state->mpcc_top_sel = REG_READ(MPCC_TOP_SEL[mpcc_inst]); + +} + static const struct mpc_funcs dcn30_mpc_funcs = { .read_mpcc_state = mpc3_read_mpcc_state, .insert_plane = mpc1_insert_plane, @@ -1544,6 +1559,7 @@ static const struct mpc_funcs dcn30_mpc_funcs = { .release_rmu = mpcc3_release_rmu, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .mpc_read_reg_state = mpc3_read_reg_state, .set_bg_color = mpc1_set_bg_color, .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h index 103f29900a2c..e2f147d17178 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h @@ -1096,6 +1096,11 @@ void mpc3_power_on_ogam_lut( struct mpc *mpc, int mpcc_id, bool power_on); +void mpc3_read_reg_state( + struct mpc *mpc, + int mpcc_inst, + struct dcn_mpc_reg_state *mpc_reg_state); + void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst); enum dc_lut_mode mpc3_get_ogam_current( diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c index 6f0e017a8ae2..83bbbf34bcac 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c @@ -1020,6 +1020,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .release_rmu = NULL, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .mpc_read_reg_state = mpc3_read_reg_state, .set_bg_color = mpc1_set_bg_color, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c index e1a0308dee57..eeac13fdd6f5 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c @@ -598,6 +598,7 @@ static const struct mpc_funcs dcn401_mpc_funcs = { .release_rmu = NULL, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .mpc_read_reg_state = mpc3_read_reg_state, .set_bg_color = mpc1_set_bg_color, .set_movable_cm_location = mpc401_set_movable_cm_location, .update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select, diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c index 71e9288d60ed..45d418636d0c 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c @@ -372,6 +372,17 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval); } + +void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state) +{ + struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); + + opp_reg_state->fmt_control = REG_READ(FMT_CONTROL); + opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL); + opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL); + opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL); +} + /*****************************************/ /* Constructor, Destructor */ /*****************************************/ @@ -392,7 +403,8 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_program_dpg_dimensions = NULL, .dpg_is_blanked = NULL, .dpg_is_pending = NULL, - .opp_destroy = opp1_destroy + .opp_destroy = opp1_destroy, + .opp_read_reg_state = opp1_read_reg_state }; void dcn10_opp_construct(struct dcn10_opp *oppn10, diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h index c87de68a509e..38d0d530a9b7 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h @@ -63,7 +63,8 @@ uint32_t OPPBUF_CONTROL1; \ uint32_t OPPBUF_3D_PARAMETERS_0; \ uint32_t OPPBUF_3D_PARAMETERS_1; \ - uint32_t OPP_PIPE_CONTROL + uint32_t OPP_PIPE_CONTROL; \ + uint32_t OPP_PIPE_CRC_CONTROL #define OPP_MASK_SH_LIST_DCN(mask_sh) \ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \ @@ -153,7 +154,6 @@ struct dcn10_opp { const struct dcn10_opp_registers *regs; const struct dcn10_opp_shift *opp_shift; const struct dcn10_opp_mask *opp_mask; - bool is_write_to_ram_a_safe; }; @@ -188,4 +188,6 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable); void opp1_destroy(struct output_pixel_processor **opp); +void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c index f5fe0cac7cb0..ce826a5be4c7 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c @@ -377,6 +377,18 @@ uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp return 0; } +void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + + opp_reg_state->dpg_control = REG_READ(DPG_CONTROL); + opp_reg_state->fmt_control = REG_READ(FMT_CONTROL); + opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL); + opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL); + opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL); + opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG); +} + /*****************************************/ /* Constructor, Destructor */ /*****************************************/ @@ -395,6 +407,7 @@ static struct opp_funcs dcn20_opp_funcs = { .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, .opp_get_left_edge_extra_pixel_count = opp2_get_left_edge_extra_pixel_count, + .opp_read_reg_state = opp2_read_reg_state }; void dcn20_opp_construct(struct dcn20_opp *oppn20, diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h index 34936e6c49f3..fb0c047c1788 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h @@ -59,7 +59,8 @@ uint32_t DPG_COLOUR_G_Y; \ uint32_t DPG_COLOUR_R_CR; \ uint32_t DPG_RAMP_CONTROL; \ - uint32_t DPG_STATUS + uint32_t DPG_STATUS; \ + uint32_t DSCRM_DSC_FORWARD_CONFIG #define OPP_DPG_MASK_SH_LIST(mask_sh) \ OPP_SF(DPG0_DPG_CONTROL, DPG_EN, mask_sh), \ @@ -171,4 +172,7 @@ void opp2_program_left_edge_extra_pixel ( uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp, enum dc_pixel_encoding pixel_encoding, bool is_primary); + +void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c index 3542b51c9aac..e11c4e16402f 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c @@ -51,3 +51,16 @@ void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable) { REG_UPDATE(OPP_TOP_CLK_CONTROL, OPP_FGCG_REP_DIS, !enable); } + +void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + + opp_reg_state->dpg_control = REG_READ(DPG_CONTROL); + opp_reg_state->fmt_control = REG_READ(FMT_CONTROL); + opp_reg_state->opp_abm_control = REG_READ(OPP_ABM_CONTROL); + opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL); + opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL); + opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL); + opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG); +} diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h index a9a413527801..c6cace90e8f2 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h @@ -31,7 +31,8 @@ #define OPP_REG_VARIABLE_LIST_DCN3_5 \ OPP_REG_VARIABLE_LIST_DCN2_0; \ - uint32_t OPP_TOP_CLK_CONTROL + uint32_t OPP_TOP_CLK_CONTROL; \ + uint32_t OPP_ABM_CONTROL #define OPP_MASK_SH_LIST_DCN35(mask_sh) \ OPP_MASK_SH_LIST_DCN20(mask_sh), \ @@ -64,4 +65,5 @@ void dcn35_opp_construct(struct dcn20_opp *oppn20, void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable); +void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); #endif diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index 8b2a8455eb56..803bcc25601c 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -209,7 +209,43 @@ uint32_t OPTC_WIDTH_CONTROL2; \ uint32_t OTG_PSTATE_REGISTER; \ uint32_t OTG_PIPE_UPDATE_STATUS; \ - uint32_t INTERRUPT_DEST + uint32_t INTERRUPT_DEST; \ + uint32_t OPTC_INPUT_SPARE_REGISTER; \ + uint32_t OPTC_RSMU_UNDERFLOW; \ + uint32_t OPTC_UNDERFLOW_THRESHOLD; \ + uint32_t OTG_COUNT_CONTROL; \ + uint32_t OTG_COUNT_RESET; \ + uint32_t OTG_CRC_SIG_BLUE_CONTROL_MASK; \ + uint32_t OTG_CRC_SIG_RED_GREEN_MASK; \ + uint32_t OTG_DLPC_CONTROL; \ + uint32_t OTG_DRR_CONTROL2; \ + uint32_t OTG_DRR_TIMING_INT_STATUS; \ + uint32_t OTG_GLOBAL_CONTROL3; \ + uint32_t OTG_GLOBAL_SYNC_STATUS; \ + uint32_t OTG_GSL_VSYNC_GAP; \ + uint32_t OTG_INTERLACE_STATUS; \ + uint32_t OTG_INTERRUPT_CONTROL; \ + uint32_t OTG_LONG_VBLANK_STATUS; \ + uint32_t OTG_MANUAL_FORCE_VSYNC_NEXT_LINE; \ + uint32_t OTG_MASTER_EN; \ + uint32_t OTG_PIXEL_DATA_READBACK0; \ + uint32_t OTG_PIXEL_DATA_READBACK1; \ + uint32_t OTG_REQUEST_CONTROL; \ + uint32_t OTG_SNAPSHOT_CONTROL; \ + uint32_t OTG_SNAPSHOT_FRAME; \ + uint32_t OTG_SNAPSHOT_POSITION; \ + uint32_t OTG_SNAPSHOT_STATUS; \ + uint32_t OTG_SPARE_REGISTER; \ + uint32_t OTG_STATUS_HV_COUNT; \ + uint32_t OTG_STATUS_VF_COUNT; \ + uint32_t OTG_STEREO_FORCE_NEXT_EYE; \ + uint32_t OTG_TRIG_MANUAL_CONTROL; \ + uint32_t OTG_TRIGB_CNTL; \ + uint32_t OTG_TRIGB_MANUAL_TRIG; \ + uint32_t OTG_UPDATE_LOCK; \ + uint32_t OTG_V_TOTAL_INT_STATUS; \ + uint32_t OTG_VSYNC_NOM_INT_STATUS + struct dcn_optc_registers { OPTC_REG_VARIABLE_LIST_DCN; diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c index 4f1830ba619f..c6417538090f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c @@ -315,6 +315,136 @@ void optc31_read_otg_state(struct timing_generator *optc, s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); } +void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + optc_reg_state->optc_bytes_per_pixel = REG_READ(OPTC_BYTES_PER_PIXEL); + optc_reg_state->optc_data_format_control = REG_READ(OPTC_DATA_FORMAT_CONTROL); + optc_reg_state->optc_data_source_select = REG_READ(OPTC_DATA_SOURCE_SELECT); + optc_reg_state->optc_input_clock_control = REG_READ(OPTC_INPUT_CLOCK_CONTROL); + optc_reg_state->optc_input_global_control = REG_READ(OPTC_INPUT_GLOBAL_CONTROL); + optc_reg_state->optc_input_spare_register = REG_READ(OPTC_INPUT_SPARE_REGISTER); + optc_reg_state->optc_memory_config = REG_READ(OPTC_MEMORY_CONFIG); + optc_reg_state->optc_rsmu_underflow = REG_READ(OPTC_RSMU_UNDERFLOW); + optc_reg_state->optc_underflow_threshold = REG_READ(OPTC_UNDERFLOW_THRESHOLD); + optc_reg_state->optc_width_control = REG_READ(OPTC_WIDTH_CONTROL); + optc_reg_state->otg_3d_structure_control = REG_READ(OTG_3D_STRUCTURE_CONTROL); + optc_reg_state->otg_clock_control = REG_READ(OTG_CLOCK_CONTROL); + optc_reg_state->otg_control = REG_READ(OTG_CONTROL); + optc_reg_state->otg_count_control = REG_READ(OTG_COUNT_CONTROL); + optc_reg_state->otg_count_reset = REG_READ(OTG_COUNT_RESET); + optc_reg_state->otg_crc_cntl = REG_READ(OTG_CRC_CNTL); + optc_reg_state->otg_crc_sig_blue_control_mask = REG_READ(OTG_CRC_SIG_BLUE_CONTROL_MASK); + optc_reg_state->otg_crc_sig_red_green_mask = REG_READ(OTG_CRC_SIG_RED_GREEN_MASK); + optc_reg_state->otg_crc0_data_b = REG_READ(OTG_CRC0_DATA_B); + optc_reg_state->otg_crc0_data_rg = REG_READ(OTG_CRC0_DATA_RG); + optc_reg_state->otg_crc0_windowa_x_control = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL); + optc_reg_state->otg_crc0_windowa_x_control_readback = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL_READBACK); + optc_reg_state->otg_crc0_windowa_y_control = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL); + optc_reg_state->otg_crc0_windowa_y_control_readback = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL_READBACK); + optc_reg_state->otg_crc0_windowb_x_control = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL); + optc_reg_state->otg_crc0_windowb_x_control_readback = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL_READBACK); + optc_reg_state->otg_crc0_windowb_y_control = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL); + optc_reg_state->otg_crc0_windowb_y_control_readback = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL_READBACK); + optc_reg_state->otg_crc1_data_b = REG_READ(OTG_CRC1_DATA_B); + optc_reg_state->otg_crc1_data_rg = REG_READ(OTG_CRC1_DATA_RG); + optc_reg_state->otg_crc1_windowa_x_control = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL); + optc_reg_state->otg_crc1_windowa_x_control_readback = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL_READBACK); + optc_reg_state->otg_crc1_windowa_y_control = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL); + optc_reg_state->otg_crc1_windowa_y_control_readback = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL_READBACK); + optc_reg_state->otg_crc1_windowb_x_control = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL); + optc_reg_state->otg_crc1_windowb_x_control_readback = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL_READBACK); + optc_reg_state->otg_crc1_windowb_y_control = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL); + optc_reg_state->otg_crc1_windowb_y_control_readback = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL_READBACK); + optc_reg_state->otg_crc2_data_b = REG_READ(OTG_CRC2_DATA_B); + optc_reg_state->otg_crc2_data_rg = REG_READ(OTG_CRC2_DATA_RG); + optc_reg_state->otg_crc3_data_b = REG_READ(OTG_CRC3_DATA_B); + optc_reg_state->otg_crc3_data_rg = REG_READ(OTG_CRC3_DATA_RG); + optc_reg_state->otg_dlpc_control = REG_READ(OTG_DLPC_CONTROL); + optc_reg_state->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); + optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTROL2); + optc_reg_state->otg_drr_control = REG_READ(OTG_DRR_CONTROL); + optc_reg_state->otg_drr_timing_int_status = REG_READ(OTG_DRR_TIMING_INT_STATUS); + optc_reg_state->otg_drr_trigger_window = REG_READ(OTG_DRR_TRIGGER_WINDOW); + optc_reg_state->otg_drr_v_total_change = REG_READ(OTG_DRR_V_TOTAL_CHANGE); + optc_reg_state->otg_dsc_start_position = REG_READ(OTG_DSC_START_POSITION); + optc_reg_state->otg_force_count_now_cntl = REG_READ(OTG_FORCE_COUNT_NOW_CNTL); + optc_reg_state->otg_global_control0 = REG_READ(OTG_GLOBAL_CONTROL0); + optc_reg_state->otg_global_control1 = REG_READ(OTG_GLOBAL_CONTROL1); + optc_reg_state->otg_global_control2 = REG_READ(OTG_GLOBAL_CONTROL2); + optc_reg_state->otg_global_control3 = REG_READ(OTG_GLOBAL_CONTROL3); + optc_reg_state->otg_global_control4 = REG_READ(OTG_GLOBAL_CONTROL4); + optc_reg_state->otg_global_sync_status = REG_READ(OTG_GLOBAL_SYNC_STATUS); + optc_reg_state->otg_gsl_control = REG_READ(OTG_GSL_CONTROL); + optc_reg_state->otg_gsl_vsync_gap = REG_READ(OTG_GSL_VSYNC_GAP); + optc_reg_state->otg_gsl_window_x = REG_READ(OTG_GSL_WINDOW_X); + optc_reg_state->otg_gsl_window_y = REG_READ(OTG_GSL_WINDOW_Y); + optc_reg_state->otg_h_blank_start_end = REG_READ(OTG_H_BLANK_START_END); + optc_reg_state->otg_h_sync_a = REG_READ(OTG_H_SYNC_A); + optc_reg_state->otg_h_sync_a_cntl = REG_READ(OTG_H_SYNC_A_CNTL); + optc_reg_state->otg_h_timing_cntl = REG_READ(OTG_H_TIMING_CNTL); + optc_reg_state->otg_h_total = REG_READ(OTG_H_TOTAL); + optc_reg_state->otg_interlace_control = REG_READ(OTG_INTERLACE_CONTROL); + optc_reg_state->otg_interlace_status = REG_READ(OTG_INTERLACE_STATUS); + optc_reg_state->otg_interrupt_control = REG_READ(OTG_INTERRUPT_CONTROL); + optc_reg_state->otg_long_vblank_status = REG_READ(OTG_LONG_VBLANK_STATUS); + optc_reg_state->otg_m_const_dto0 = REG_READ(OTG_M_CONST_DTO0); + optc_reg_state->otg_m_const_dto1 = REG_READ(OTG_M_CONST_DTO1); + optc_reg_state->otg_manual_force_vsync_next_line = REG_READ(OTG_MANUAL_FORCE_VSYNC_NEXT_LINE); + optc_reg_state->otg_master_en = REG_READ(OTG_MASTER_EN); + optc_reg_state->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK); + optc_reg_state->otg_master_update_mode = REG_READ(OTG_MASTER_UPDATE_MODE); + optc_reg_state->otg_nom_vert_position = REG_READ(OTG_NOM_VERT_POSITION); + optc_reg_state->otg_pipe_update_status = REG_READ(OTG_PIPE_UPDATE_STATUS); + optc_reg_state->otg_pixel_data_readback0 = REG_READ(OTG_PIXEL_DATA_READBACK0); + optc_reg_state->otg_pixel_data_readback1 = REG_READ(OTG_PIXEL_DATA_READBACK1); + optc_reg_state->otg_request_control = REG_READ(OTG_REQUEST_CONTROL); + optc_reg_state->otg_snapshot_control = REG_READ(OTG_SNAPSHOT_CONTROL); + optc_reg_state->otg_snapshot_frame = REG_READ(OTG_SNAPSHOT_FRAME); + optc_reg_state->otg_snapshot_position = REG_READ(OTG_SNAPSHOT_POSITION); + optc_reg_state->otg_snapshot_status = REG_READ(OTG_SNAPSHOT_STATUS); + optc_reg_state->otg_spare_register = REG_READ(OTG_SPARE_REGISTER); + optc_reg_state->otg_static_screen_control = REG_READ(OTG_STATIC_SCREEN_CONTROL); + optc_reg_state->otg_status = REG_READ(OTG_STATUS); + optc_reg_state->otg_status_frame_count = REG_READ(OTG_STATUS_FRAME_COUNT); + optc_reg_state->otg_status_hv_count = REG_READ(OTG_STATUS_HV_COUNT); + optc_reg_state->otg_status_position = REG_READ(OTG_STATUS_POSITION); + optc_reg_state->otg_status_vf_count = REG_READ(OTG_STATUS_VF_COUNT); + optc_reg_state->otg_stereo_control = REG_READ(OTG_STEREO_CONTROL); + optc_reg_state->otg_stereo_force_next_eye = REG_READ(OTG_STEREO_FORCE_NEXT_EYE); + optc_reg_state->otg_stereo_status = REG_READ(OTG_STEREO_STATUS); + optc_reg_state->otg_trig_manual_control = REG_READ(OTG_TRIG_MANUAL_CONTROL); + optc_reg_state->otg_triga_cntl = REG_READ(OTG_TRIGA_CNTL); + optc_reg_state->otg_triga_manual_trig = REG_READ(OTG_TRIGA_MANUAL_TRIG); + optc_reg_state->otg_trigb_cntl = REG_READ(OTG_TRIGB_CNTL); + optc_reg_state->otg_trigb_manual_trig = REG_READ(OTG_TRIGB_MANUAL_TRIG); + optc_reg_state->otg_update_lock = REG_READ(OTG_UPDATE_LOCK); + optc_reg_state->otg_v_blank_start_end = REG_READ(OTG_V_BLANK_START_END); + optc_reg_state->otg_v_count_stop_control = REG_READ(OTG_V_COUNT_STOP_CONTROL); + optc_reg_state->otg_v_count_stop_control2 = REG_READ(OTG_V_COUNT_STOP_CONTROL2); + optc_reg_state->otg_v_sync_a = REG_READ(OTG_V_SYNC_A); + optc_reg_state->otg_v_sync_a_cntl = REG_READ(OTG_V_SYNC_A_CNTL); + optc_reg_state->otg_v_total = REG_READ(OTG_V_TOTAL); + optc_reg_state->otg_v_total_control = REG_READ(OTG_V_TOTAL_CONTROL); + optc_reg_state->otg_v_total_int_status = REG_READ(OTG_V_TOTAL_INT_STATUS); + optc_reg_state->otg_v_total_max = REG_READ(OTG_V_TOTAL_MAX); + optc_reg_state->otg_v_total_mid = REG_READ(OTG_V_TOTAL_MID); + optc_reg_state->otg_v_total_min = REG_READ(OTG_V_TOTAL_MIN); + optc_reg_state->otg_vert_sync_control = REG_READ(OTG_VERT_SYNC_CONTROL); + optc_reg_state->otg_vertical_interrupt0_control = REG_READ(OTG_VERTICAL_INTERRUPT0_CONTROL); + optc_reg_state->otg_vertical_interrupt0_position = REG_READ(OTG_VERTICAL_INTERRUPT0_POSITION); + optc_reg_state->otg_vertical_interrupt1_control = REG_READ(OTG_VERTICAL_INTERRUPT1_CONTROL); + optc_reg_state->otg_vertical_interrupt1_position = REG_READ(OTG_VERTICAL_INTERRUPT1_POSITION); + optc_reg_state->otg_vertical_interrupt2_control = REG_READ(OTG_VERTICAL_INTERRUPT2_CONTROL); + optc_reg_state->otg_vertical_interrupt2_position = REG_READ(OTG_VERTICAL_INTERRUPT2_POSITION); + optc_reg_state->otg_vready_param = REG_READ(OTG_VREADY_PARAM); + optc_reg_state->otg_vstartup_param = REG_READ(OTG_VSTARTUP_PARAM); + optc_reg_state->otg_vsync_nom_int_status = REG_READ(OTG_VSYNC_NOM_INT_STATUS); + optc_reg_state->otg_vupdate_keepout = REG_READ(OTG_VUPDATE_KEEPOUT); + optc_reg_state->otg_vupdate_param = REG_READ(OTG_VUPDATE_PARAM); +} + static const struct timing_generator_funcs dcn31_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -377,6 +507,7 @@ static const struct timing_generator_funcs dcn31_tg_funcs = { .init_odm = optc3_init_odm, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn31_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h index 0f72c274f40b..98f7d2e299c5 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h @@ -274,4 +274,6 @@ void optc3_init_odm(struct timing_generator *optc); void optc31_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s); +void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state); + #endif /* __DC_OPTC_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c index 4a2caca37255..43ff957288b2 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c @@ -256,6 +256,7 @@ static const struct timing_generator_funcs dcn314_tg_funcs = { .set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn314_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index b2b226bcd871..3dcb0d0c931c 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -365,6 +365,7 @@ static const struct timing_generator_funcs dcn32_tg_funcs = { .get_otg_double_buffer_pending = optc3_get_otg_update_pending, .get_pipe_update_pending = optc3_get_pipe_update_pending, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn32_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c index 52d5ea98c86b..f699e95059f3 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c @@ -511,6 +511,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = { .set_long_vtotal = optc35_set_long_vtotal, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn35_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c index 5af13706e601..a8e978d1fae8 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c @@ -533,6 +533,7 @@ static const struct timing_generator_funcs dcn401_tg_funcs = { .set_vupdate_keepout = optc401_set_vupdate_keepout, .wait_update_lock_status = optc401_wait_update_lock_status, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn401_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index c4b4dc3ad8c9..d40d91ec2035 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -78,6 +78,7 @@ #endif #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -225,6 +226,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(4), link_regs(5), link_regs(6), + { .DAC_ENABLE = mmDAC_ENABLE }, }; #define stream_enc_regs(id)\ @@ -368,6 +370,7 @@ static const struct dce_abm_mask abm_mask = { #define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -375,6 +378,7 @@ static const struct bios_registers bios_regs = { static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, @@ -402,8 +406,10 @@ static const struct dc_plane_cap plane_cap = { } }; -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; #define CTX ctx @@ -484,6 +490,11 @@ static struct stream_encoder *dce100_stream_encoder_create( if (!enc110) return NULL; + if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + return &enc110->base; + } + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; @@ -624,7 +635,20 @@ static struct link_encoder *dce100_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) + if (!enc110) + return NULL; + + if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[ENGINE_ID_DACA], + NULL, + NULL); + return &enc110->base; + } + + if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = @@ -952,6 +976,10 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( int i; int j = -1; struct dc_link *link = stream->link; + enum engine_id preferred_engine = link->link_enc->preferred_engine; + + if (dc_is_rgb_signal(stream->signal)) + preferred_engine = link->link_enc->analog_engine; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && @@ -960,8 +988,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( * in daisy chain use case */ j = i; - if (pool->stream_enc[i]->id == - link->link_enc->preferred_engine) + if (pool->stream_enc[i]->id == preferred_engine) return pool->stream_enc[i]; } } @@ -1093,6 +1120,7 @@ static bool dce100_resource_construct( dc->caps.disable_dp_clk_share = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index cccde5a6f3cd..cd54382c0af3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -82,6 +82,7 @@ #endif #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -377,6 +378,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -424,7 +426,9 @@ static const struct dc_plane_cap plane_cap = { 64 }; -static const struct dc_debug_options debug_defaults = { +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { .enable_legacy_fast_update = true, }; @@ -1376,6 +1380,7 @@ static bool dce110_resource_construct( dc->caps.is_apu = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 869a8e515fc0..3f0a6bc4dcc2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -76,6 +76,7 @@ #endif #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -385,6 +386,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -429,8 +431,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; #define CTX ctx @@ -1247,6 +1251,7 @@ static bool dce112_resource_construct( dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 540e04ec1e2d..b1570b6b1af3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -491,6 +491,7 @@ static struct dce_i2c_hw *dce120_i2c_hw_create( return dce_i2c_hw; } static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0 + NBIO_BASE(mmBIOS_SCRATCH_0_BASE_IDX), .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX), .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) }; @@ -526,8 +527,11 @@ static const struct dc_plane_cap plane_cap = { }; static const struct dc_debug_options debug_defaults = { - .disable_clock_gate = true, - .enable_legacy_fast_update = true, + .disable_clock_gate = true, +}; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; static struct clock_source *dce120_clock_source_create( @@ -1089,6 +1093,7 @@ static bool dce120_resource_construct( dc->caps.psp_setup_panel_mode = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index b75be6ad64f6..f0152933bee2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -80,6 +80,7 @@ #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -240,7 +241,9 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(2), link_regs(3), link_regs(4), - link_regs(5) + link_regs(5), + {0}, + { .DAC_ENABLE = mmDAC_ENABLE }, }; #define stream_enc_regs(id)\ @@ -366,6 +369,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -373,6 +377,7 @@ static const struct bios_registers bios_regs = { static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, @@ -382,6 +387,7 @@ static const struct resource_caps res_cap_61 = { .num_timing_generator = 4, .num_audio = 6, .num_stream_encoder = 6, + .num_analog_stream_encoder = 1, .num_pll = 3, .num_ddc = 6, }; @@ -389,6 +395,7 @@ static const struct resource_caps res_cap_61 = { static const struct resource_caps res_cap_64 = { .num_timing_generator = 2, .num_audio = 2, + .num_analog_stream_encoder = 1, .num_stream_encoder = 2, .num_pll = 3, .num_ddc = 2, @@ -599,6 +606,11 @@ static struct stream_encoder *dce60_stream_encoder_create( if (!enc110) return NULL; + if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + return &enc110->base; + } + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); @@ -718,7 +730,20 @@ static struct link_encoder *dce60_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) + if (!enc110) + return NULL; + + if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[ENGINE_ID_DACA], + NULL, + NULL); + return &enc110->base; + } + + if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 5b7769745202..8687104cabb7 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -78,6 +78,7 @@ #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -241,6 +242,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(4), link_regs(5), link_regs(6), + { .DAC_ENABLE = mmDAC_ENABLE }, }; #define stream_enc_regs(id)\ @@ -368,6 +370,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -375,6 +378,7 @@ static const struct bios_registers bios_regs = { static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, @@ -383,6 +387,7 @@ static const struct resource_caps res_cap = { static const struct resource_caps res_cap_81 = { .num_timing_generator = 4, .num_audio = 7, + .num_analog_stream_encoder = 1, .num_stream_encoder = 7, .num_pll = 3, .num_ddc = 6, @@ -391,6 +396,7 @@ static const struct resource_caps res_cap_81 = { static const struct resource_caps res_cap_83 = { .num_timing_generator = 2, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 2, .num_ddc = 2, @@ -418,8 +424,10 @@ static const struct dc_plane_cap plane_cap = { } }; -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; static const struct dce_dmcu_registers dmcu_regs = { @@ -605,6 +613,11 @@ static struct stream_encoder *dce80_stream_encoder_create( if (!enc110) return NULL; + if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + return &enc110->base; + } + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); @@ -724,7 +737,20 @@ static struct link_encoder *dce80_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) + if (!enc110) + return NULL; + + if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[ENGINE_ID_DACA], + NULL, + NULL); + return &enc110->base; + } + + if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = @@ -919,6 +945,7 @@ static bool dce80_construct( dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * @@ -1320,6 +1347,7 @@ static bool dce83_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index 652c05c35494..f12367adf145 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -556,10 +556,13 @@ static const struct dc_debug_options debug_defaults_drv = { .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static void dcn10_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN10_DPP(*dpp)); @@ -1395,6 +1398,8 @@ static bool dcn10_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 0; + dc->debug = debug_defaults_drv; + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 84b38d2d6967..6679c1a14f2f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -718,10 +718,13 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + void dcn20_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); @@ -733,7 +736,7 @@ struct dpp *dcn20_dpp_create( uint32_t inst) { struct dcn20_dpp *dpp = - kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL); if (!dpp) return NULL; @@ -751,7 +754,7 @@ struct input_pixel_processor *dcn20_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = - kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); @@ -768,7 +771,7 @@ struct output_pixel_processor *dcn20_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); @@ -785,7 +788,7 @@ struct dce_aux *dcn20_aux_engine_create( uint32_t inst) { struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; @@ -823,7 +826,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create( uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; @@ -835,8 +838,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create( } struct mpc *dcn20_mpc_create(struct dc_context *ctx) { - struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), - GFP_ATOMIC); + struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL); if (!mpc20) return NULL; @@ -853,8 +855,7 @@ struct mpc *dcn20_mpc_create(struct dc_context *ctx) struct hubbub *dcn20_hubbub_create(struct dc_context *ctx) { int i; - struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), - GFP_ATOMIC); + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub) return NULL; @@ -882,7 +883,7 @@ struct timing_generator *dcn20_timing_generator_create( uint32_t instance) { struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_ATOMIC); + kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; @@ -962,7 +963,7 @@ static struct clock_source *dcn20_clock_source_create( bool dp_clk_src) { struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; @@ -1061,7 +1062,7 @@ struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); @@ -1198,7 +1199,7 @@ struct hubp *dcn20_hubp_create( uint32_t inst) { struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; @@ -1668,6 +1669,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) return false; @@ -2286,7 +2288,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) { - struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC); + struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); if (!pp_smu) return pp_smu; @@ -2472,6 +2474,7 @@ static bool dcn20_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->caps.dp_hdmi21_pcon_support = true; + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -2765,7 +2768,7 @@ struct resource_pool *dcn20_create_resource_pool( struct dc *dc) { struct dcn20_resource_pool *pool = - kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL); if (!pool) return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index e4a1338d21e0..055107843a70 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -614,10 +614,13 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_tri_buf = true, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static void dcn201_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN201_DPP(*dpp)); @@ -629,7 +632,7 @@ static struct dpp *dcn201_dpp_create( uint32_t inst) { struct dcn201_dpp *dpp = - kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_dpp), GFP_KERNEL); if (!dpp) return NULL; @@ -646,7 +649,7 @@ static struct input_pixel_processor *dcn201_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = - kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { return NULL; @@ -662,7 +665,7 @@ static struct output_pixel_processor *dcn201_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn201_opp *opp = - kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_opp), GFP_KERNEL); if (!opp) { return NULL; @@ -677,7 +680,7 @@ static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; @@ -710,7 +713,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; @@ -723,8 +726,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc) { - struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), - GFP_ATOMIC); + struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), GFP_KERNEL); if (!mpc201) return NULL; @@ -740,8 +742,7 @@ static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc) static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx) { - struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), - GFP_ATOMIC); + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub) return NULL; @@ -759,7 +760,7 @@ static struct timing_generator *dcn201_timing_generator_create( uint32_t instance) { struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_ATOMIC); + kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; @@ -793,7 +794,7 @@ static struct link_encoder *dcn201_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); struct dcn10_link_encoder *enc10; if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) @@ -821,7 +822,7 @@ static struct clock_source *dcn201_clock_source_create( bool dp_clk_src) { struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; @@ -856,7 +857,7 @@ static struct stream_encoder *dcn201_stream_encoder_create( struct dc_context *ctx) { struct dcn10_stream_encoder *enc1 = - kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC); + kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); if (!enc1) return NULL; @@ -883,7 +884,7 @@ static const struct dce_hwseq_mask hwseq_mask = { static struct dce_hwseq *dcn201_hwseq_create( struct dc_context *ctx) { - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC); + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; @@ -983,7 +984,7 @@ static struct hubp *dcn201_hubp_create( uint32_t inst) { struct dcn201_hubp *hubp201 = - kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_hubp), GFP_KERNEL); if (!hubp201) return NULL; @@ -1153,6 +1154,7 @@ static bool dcn201_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->debug = debug_defaults_drv; + dc->check_config = config_defaults; /*a0 only, remove later*/ dc->work_arounds.no_connect_phy_config = true; @@ -1303,7 +1305,7 @@ struct resource_pool *dcn201_create_resource_pool( struct dc *dc) { struct dcn201_resource_pool *pool = - kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_resource_pool), GFP_KERNEL); if (!pool) return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 918742a42ded..2060acd5ae09 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -626,10 +626,13 @@ static const struct dc_debug_options debug_defaults_drv = { .usbc_combo_phy_reset_wa = true, .dmub_command_table = true, .use_max_lb = true, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1458,6 +1461,7 @@ static bool dcn21_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->caps.dp_hdmi21_pcon_support = true; + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index ff63f59ff928..d0ebb733e802 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -727,10 +727,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -2374,6 +2377,7 @@ static bool dcn30_resource_construct( dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 82a205a7c25c..3ad6a3d4858e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -701,10 +701,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = false, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static void dcn301_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); @@ -1498,6 +1501,7 @@ static bool dcn301_resource_construct( bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 61623cb518d9..c0d4a1dc94f8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1290,6 +1293,7 @@ static bool dcn302_resource_construct( &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 02b9a84f2db3..75e09c2c283e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1234,6 +1237,7 @@ static bool dcn303_resource_construct( bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 3ed7f50554e2..0d667b54ccf8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -888,12 +888,15 @@ static const struct dc_debug_options debug_defaults_drv = { } }, .disable_z10 = true, - .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1978,6 +1981,7 @@ static bool dcn31_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index d4917a35b991..3ccde75a4ecb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -924,12 +924,15 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = true, - .enable_legacy_fast_update = true, .using_dml2 = false, .disable_dsc_power_gate = true, .min_disp_clk_khz = 100000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1910,6 +1913,7 @@ static bool dcn314_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 82cc78c291d8..4e962f522f1b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -887,9 +887,13 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, - .enable_legacy_fast_update = true, .psr_power_use_phy_fsm = 0, .using_dml2 = false, + .min_disp_clk_khz = 100000, +}; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { @@ -1939,6 +1943,7 @@ static bool dcn315_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 636110e48d01..5a95dd54cb42 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -882,10 +882,13 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1815,6 +1818,7 @@ static bool dcn316_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 3965a7f1b64b..b276fec3e479 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -92,7 +92,7 @@ #include "dc_state_priv.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #define DC_LOGGER_INIT(logger) @@ -738,10 +738,13 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dp_plus_plus_wa = true, .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, - .enable_legacy_fast_update = false, .disable_stutter_for_wm_program = true }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static struct dce_aux *dcn32_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1844,7 +1847,7 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, dc_state_set_stream_cursor_subvp_limit(stream, context, true); status = DC_FAIL_HW_CURSOR_SUPPORT; } - }; + } } if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { @@ -2197,7 +2200,8 @@ static bool dcn32_resource_construct( dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/ dc->caps.max_cursor_size = 64; - dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4) + /* floor(sqrt(buf_size_bytes / bpp ) * bpp, fixed_req_size) / bpp = max_width */ + dc->caps.max_buffered_cursor_size = 64; // floor(sqrt(16 * 1024 / 4) * 4, 256) / 4 = 64 dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; @@ -2294,6 +2298,7 @@ static bool dcn32_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index ad214986f7ac..3466ca34c93f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -731,11 +731,14 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_subvp_high_refresh = false, .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, - .enable_legacy_fast_update = false, .disable_dc_mode_overwrite = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static struct dce_aux *dcn321_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1797,6 +1800,7 @@ static bool dcn321_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index fff57f23f4f7..ef69898d2cc5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -33,7 +33,7 @@ #include "resource.h" #include "include/irq_service_interface.h" #include "dcn35_resource.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "dcn20/dcn20_resource.h" #include "dcn30/dcn30_resource.h" @@ -767,7 +767,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, - .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, @@ -788,6 +787,10 @@ static const struct dc_debug_options debug_defaults_drv = { .min_disp_clk_khz = 50000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1946,6 +1949,7 @@ static bool dcn35_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 0abd163b425e..f3c614c4490c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -83,7 +83,7 @@ #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "link_enc_cfg.h" #define DC_LOGGER_INIT(logger) @@ -747,7 +747,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, - .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, @@ -768,6 +767,10 @@ static const struct dc_debug_options debug_defaults_drv = { .min_disp_clk_khz = 50000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1917,6 +1920,7 @@ static bool dcn351_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index ca125ee6c2fb..6469d5fe2e6d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -11,7 +11,7 @@ #include "resource.h" #include "include/irq_service_interface.h" #include "dcn36_resource.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "dcn20/dcn20_resource.h" #include "dcn30/dcn30_resource.h" @@ -748,7 +748,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, - .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, @@ -769,6 +768,10 @@ static const struct dc_debug_options debug_defaults_drv = { .min_disp_clk_khz = 50000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1918,6 +1921,7 @@ static bool dcn36_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 1d18807e4749..875ae97489d3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -73,7 +73,7 @@ #include "dc_state_priv.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #define DC_LOGGER_INIT(logger) @@ -721,7 +721,6 @@ static const struct dc_debug_options debug_defaults_drv = { .alloc_extra_way_for_cursor = true, .min_prefetch_in_strobe_ns = 60000, // 60us .disable_unbounded_requesting = false, - .enable_legacy_fast_update = false, .dcc_meta_propagation_delay_us = 10, .fams_version = { .minor = 1, @@ -737,6 +736,10 @@ static const struct dc_debug_options debug_defaults_drv = { .force_cositing = CHROMA_COSITING_NONE + 1, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static struct dce_aux *dcn401_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1668,7 +1671,7 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc, dc_state_set_stream_cursor_subvp_limit(stream, context, true); status = DC_FAIL_HW_CURSOR_SUPPORT; } - }; + } } if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { @@ -1995,6 +1998,7 @@ static bool dcn401_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index 0fc66487d800..e1fa2e80a15a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -227,7 +227,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); #define LE_DCN401_REG_LIST_RI(id) \ LE_DCN3_REG_LIST_RI(id), \ SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \ - SRI_ARR(DIG_BE_CLK_CNTL, DIG, id) + SRI_ARR(DIG_BE_CLK_CNTL, DIG, id),\ + SR_ARR(DIO_CLK_CNTL, id) /* DPP */ #define DPP_REG_LIST_DCN401_COMMON_RI(id) \ diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h index 21d842857601..88c11b6be004 100644 --- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h +++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h @@ -9,7 +9,7 @@ #include "dc.h" #include "clk_mgr.h" #include "soc_and_ip_translator.h" -#include "dml2/dml21/inc/dml_top_soc_parameter_types.h" +#include "dml2_0/dml21/inc/dml_top_soc_parameter_types.h" void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator); diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c index b1fb0f8a253a..7a839984dbc0 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c @@ -1018,6 +1018,21 @@ static bool spl_get_optimal_number_of_taps( spl_scratch->scl_data.taps.h_taps_c = 6; spl_scratch->scl_data.taps.v_taps_c = 6; } + + /* Override mode: keep EASF enabled but use input taps if valid */ + if (spl_in->override_easf) { + spl_scratch->scl_data.taps.h_taps = (in_taps->h_taps != 0) ? in_taps->h_taps : spl_scratch->scl_data.taps.h_taps; + spl_scratch->scl_data.taps.v_taps = (in_taps->v_taps != 0) ? in_taps->v_taps : spl_scratch->scl_data.taps.v_taps; + spl_scratch->scl_data.taps.h_taps_c = (in_taps->h_taps_c != 0) ? in_taps->h_taps_c : spl_scratch->scl_data.taps.h_taps_c; + spl_scratch->scl_data.taps.v_taps_c = (in_taps->v_taps_c != 0) ? in_taps->v_taps_c : spl_scratch->scl_data.taps.v_taps_c; + + if ((spl_scratch->scl_data.taps.h_taps > 6) || (spl_scratch->scl_data.taps.v_taps > 6)) + skip_easf = true; + if ((spl_scratch->scl_data.taps.h_taps > 1) && (spl_scratch->scl_data.taps.h_taps % 2)) + spl_scratch->scl_data.taps.h_taps--; + if ((spl_scratch->scl_data.taps.h_taps_c > 1) && (spl_scratch->scl_data.taps.h_taps_c % 2)) + spl_scratch->scl_data.taps.h_taps_c--; + } } /*Ensure we can support the requested number of vtaps*/ diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h index 23d254dea18f..20e4e52a77ac 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h @@ -545,6 +545,7 @@ struct spl_in { enum linear_light_scaling lls_pref; // Linear Light Scaling bool prefer_easf; bool disable_easf; + bool override_easf; /* If true, keep EASF enabled but use provided in_taps */ struct spl_debug debug; bool is_fullscreen; bool is_hdr_on; diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index 6ffc74fc9dcd..ad088d70e189 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -44,11 +44,6 @@ static void virtual_stream_encoder_dvi_set_stream_attribute( struct dc_crtc_timing *crtc_timing, bool is_dual_link) {} -static void virtual_stream_encoder_lvds_set_stream_attribute( - struct stream_encoder *enc, - struct dc_crtc_timing *crtc_timing) -{} - static void virtual_stream_encoder_set_throttled_vcp_size( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp) @@ -120,8 +115,6 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = { virtual_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = virtual_stream_encoder_dvi_set_stream_attribute, - .lvds_set_stream_attribute = - virtual_stream_encoder_lvds_set_stream_attribute, .set_throttled_vcp_size = virtual_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 338fdc651f2c..9d0168986fe7 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -132,6 +132,7 @@ enum dmub_window_id { DMUB_WINDOW_IB_MEM, DMUB_WINDOW_SHARED_STATE, DMUB_WINDOW_LSDMA_BUFFER, + DMUB_WINDOW_CURSOR_OFFLOAD, DMUB_WINDOW_TOTAL, }; @@ -317,6 +318,7 @@ struct dmub_srv_hw_params { bool enable_non_transparent_setconfig; bool lower_hbr3_phy_ssc; bool override_hbr3_pll_vco; + bool disable_dpia_bw_allocation; }; /** @@ -361,6 +363,19 @@ struct dmub_diagnostic_data { uint8_t is_pwait : 1; }; +/** + * struct dmub_preos_info - preos fw info before loading post os fw. + */ +struct dmub_preos_info { + uint64_t fb_base; + uint64_t fb_offset; + uint64_t trace_buffer_phy_addr; + uint32_t trace_buffer_size; + uint32_t fw_version; + uint32_t boot_status; + uint32_t boot_options; +}; + struct dmub_srv_inbox { /* generic status */ uint64_t num_submitted; @@ -486,6 +501,7 @@ struct dmub_srv_hw_funcs { uint32_t (*get_current_time)(struct dmub_srv *dmub); void (*get_diagnostic_data)(struct dmub_srv *dmub); + bool (*get_preos_fw_info)(struct dmub_srv *dmub); bool (*should_detect)(struct dmub_srv *dmub); void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx); @@ -535,7 +551,8 @@ struct dmub_srv_create_params { * @fw_version: the current firmware version, if any * @is_virtual: false if hardware support only * @shared_state: dmub shared state between firmware and driver - * @fw_state: dmub firmware state pointer + * @cursor_offload_v1: Cursor offload state + * @fw_state: dmub firmware state pointer (debug purpose only) */ struct dmub_srv { enum dmub_asic asic; @@ -544,7 +561,9 @@ struct dmub_srv { bool is_virtual; struct dmub_fb scratch_mem_fb; struct dmub_fb ib_mem_gart; + struct dmub_fb cursor_offload_fb; volatile struct dmub_shared_state_feature_block *shared_state; + volatile struct dmub_cursor_offload_v1 *cursor_offload_v1; volatile const struct dmub_fw_state *fw_state; /* private: internal use only */ @@ -583,6 +602,7 @@ struct dmub_srv { enum dmub_srv_power_state_type power_state; struct dmub_diagnostic_data debug; struct dmub_fb lsdma_rb_fb; + struct dmub_preos_info preos_info; }; /** @@ -1068,4 +1088,14 @@ enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub, */ enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub); +/** + * dmub_srv_get_preos_info() - retrieves preos fw info + * @dmub: the dmub service + * + * Return: + * true - preos fw info retrieved successfully + * false - preos fw info not retrieved successfully + */ +bool dmub_srv_get_preos_info(struct dmub_srv *dmub); + #endif /* _DMUB_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 92248224b713..3f2a0ed02c59 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -485,7 +485,19 @@ union replay_debug_flags { */ uint32_t enable_visual_confirm_debug : 1; - uint32_t reserved : 18; + /** + * 0x4000 (bit 14) + * @debug_log_enabled: Debug Log Enabled + */ + uint32_t debug_log_enabled : 1; + + /** + * 0x8000 (bit 15) + * @enable_sub_feature_visual_confirm: Enable Sub Feature Visual Confirm + */ + uint32_t enable_sub_feature_visual_confirm : 1; + + uint32_t reserved : 16; } bitfields; uint32_t u32All; @@ -593,6 +605,104 @@ union replay_hw_flags { uint32_t u32All; }; +/** + * Flags that can be set by driver to change some Panel Replay behaviour. + */ +union pr_debug_flags { + struct { + /** + * 0x1 (bit 0) + * Enable visual confirm in FW. + */ + uint32_t visual_confirm : 1; + + /** + * 0x2 (bit 1) + * @skip_crc: Set if need to skip CRC. + */ + uint32_t skip_crc : 1; + + /** + * 0x4 (bit 2) + * @force_link_power_on: Force disable ALPM control + */ + uint32_t force_link_power_on : 1; + + /** + * 0x8 (bit 3) + * @force_phy_power_on: Force phy power on + */ + uint32_t force_phy_power_on : 1; + + /** + * 0x10 (bit 4) + * @skip_crtc_disabled: CRTC disable skipped + */ + uint32_t skip_crtc_disabled : 1; + + /* + * 0x20 (bit 5) + * @visual_confirm_rate_control: Enable Visual Confirm rate control detection + */ + uint32_t visual_confirm_rate_control : 1; + + uint32_t reserved : 26; + } bitfields; + + uint32_t u32All; +}; + +union pr_hw_flags { + struct { + /** + * @allow_alpm_fw_standby_mode: To indicate whether the + * ALPM FW standby mode is allowed + */ + uint32_t allow_alpm_fw_standby_mode : 1; + + /* + * @dsc_enable_status: DSC enable status in driver + */ + uint32_t dsc_enable_status : 1; + + /** + * @fec_enable_status: receive fec enable/disable status from driver + */ + uint32_t fec_enable_status : 1; + + /* + * @smu_optimizations_en: SMU power optimization. + * Only when active display is Replay capable and display enters Replay. + * Trigger interrupt to SMU to powerup/down. + */ + uint32_t smu_optimizations_en : 1; + + /** + * @phy_power_state: Indicates current phy power state + */ + uint32_t phy_power_state : 1; + + /** + * @link_power_state: Indicates current link power state + */ + uint32_t link_power_state : 1; + /** + * Use TPS3 signal when restore main link. + */ + uint32_t force_wakeup_by_tps3 : 1; + /** + * @is_alpm_initialized: Indicates whether ALPM is initialized + */ + uint32_t is_alpm_initialized : 1; + /** + * @alpm_mode: Indicates ALPM mode selected + */ + uint32_t alpm_mode : 2; + } bitfields; + + uint32_t u32All; +}; + union fw_assisted_mclk_switch_version { struct { uint8_t minor : 5; @@ -617,6 +727,7 @@ struct dmub_feature_caps { uint8_t replay_supported; uint8_t replay_reserved[3]; uint8_t abm_aux_backlight_support; + uint8_t lsdma_support_in_dmu; }; struct dmub_visual_confirm_color { @@ -629,6 +740,112 @@ struct dmub_visual_confirm_color { uint16_t panel_inst; }; +/** + * struct dmub_cursor_offload_pipe_data_dcn30_v1 - DCN30+ per pipe data. + */ +struct dmub_cursor_offload_pipe_data_dcn30_v1 { + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS; + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16; + uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5; + uint32_t reserved0[4]; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1; + uint32_t CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24; + uint32_t CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24; + uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS : 16; + uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE, : 16; + uint32_t reserved1[5]; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8; + uint32_t reserved2[3]; +}; + +/** + * struct dmub_cursor_offload_pipe_data_dcn401_v1 - DCN401 per pipe data. + */ +struct dmub_cursor_offload_pipe_data_dcn401_v1 { + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS; + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16; + uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5; + uint32_t reserved0[4]; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1; + uint32_t CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24; + uint32_t CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y : 16; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y, : 16; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB : 16; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB : 16; + uint32_t reserved1[4]; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8; + uint32_t HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR : 1; + uint32_t reserved2[3]; +}; + +/** + * struct dmub_cursor_offload_pipe_data_v1 - Per pipe data for cursor offload. + */ +struct dmub_cursor_offload_pipe_data_v1 { + union { + struct dmub_cursor_offload_pipe_data_dcn30_v1 dcn30; /**< DCN30 cursor data. */ + struct dmub_cursor_offload_pipe_data_dcn401_v1 dcn401; /**< DCN401 cursor data. */ + uint8_t payload[96]; /**< Guarantees the cursor pipe data size per-pipe. */ + }; +}; + +/** + * struct dmub_cursor_offload_payload_data_v1 - A payload of stream data. + */ +struct dmub_cursor_offload_payload_data_v1 { + uint32_t write_idx_start; /**< Write index, updated before pipe_data is written. */ + uint32_t write_idx_finish; /**< Write index, updated after pipe_data is written. */ + uint32_t pipe_mask; /**< Mask of pipes to update. */ + uint32_t reserved; /**< Reserved for future use. */ + struct dmub_cursor_offload_pipe_data_v1 pipe_data[6]; /**< Per-pipe cursor data. */ +}; + +/** + * struct dmub_cursor_offload_stream_v1 - Per-stream data for cursor offload. + */ +struct dmub_cursor_offload_stream_v1 { + struct dmub_cursor_offload_payload_data_v1 payloads[4]; /**< A small buffer of cursor payloads. */ + uint32_t write_idx; /**< The index of the last written payload. */ +}; + +/** + * struct dmub_cursor_offload_v1 - Cursor offload feature state. + */ +struct dmub_cursor_offload_v1 { + struct dmub_cursor_offload_stream_v1 offload_streams[6]; /**< Per-stream cursor offload data */ +}; + //============================================================================== //</DMUB_TYPES>================================================================= //============================================================================== @@ -648,7 +865,8 @@ struct dmub_visual_confirm_color { union dmub_fw_meta_feature_bits { struct { uint32_t shared_state_link_detection : 1; /**< 1 supports link detection via shared state */ - uint32_t reserved : 31; + uint32_t cursor_offload_v1_support: 1; /**< 1 supports cursor offload */ + uint32_t reserved : 30; } bits; /**< status bits */ uint32_t all; /**< 32-bit access to status bits */ }; @@ -814,6 +1032,28 @@ enum dmub_ips_comand_type { }; /** + * enum dmub_cursor_offload_comand_type - Cursor offload subcommands. + */ +enum dmub_cursor_offload_comand_type { + /** + * Initializes the cursor offload feature. + */ + DMUB_CMD__CURSOR_OFFLOAD_INIT = 0, + /** + * Enables cursor offloading for a stream and updates the timing parameters. + */ + DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE = 1, + /** + * Disables cursor offloading for a given stream. + */ + DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE = 2, + /** + * Programs the latest data for a given stream. + */ + DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM = 3, +}; + +/** * union dmub_fw_boot_options - Boot option definitions for SCRATCH14 */ union dmub_fw_boot_options { @@ -844,7 +1084,8 @@ union dmub_fw_boot_options { uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */ uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */ uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */ - uint32_t reserved : 5; /**< reserved */ + uint32_t disable_dpia_bw_allocation: 1; /**< 1 to disable the USB4 DPIA BW allocation */ + uint32_t reserved : 4; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -877,6 +1118,7 @@ enum dmub_shared_state_feature_id { DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1, DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2, DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3, + DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1 = 4, DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */ }; @@ -958,6 +1200,22 @@ struct dmub_shared_state_ips_driver { }; /* 248-bytes, fixed */ /** + * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload. + */ +struct dmub_shared_state_cursor_offload_stream_v1 { + uint32_t last_write_idx; /**< Last write index */ + uint8_t reserved[28]; /**< Reserved bytes. */ +}; /* 32-bytes, fixed */ + +/** + * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload. + */ +struct dmub_shared_state_cursor_offload_v1 { + struct dmub_shared_state_cursor_offload_stream_v1 offload_streams[6]; /**< stream state, 32-bytes each */ + uint8_t reserved[56]; /**< reserved for future use */ +}; /* 248-bytes, fixed */ + +/** * enum dmub_shared_state_feature_common - Generic payload. */ struct dmub_shared_state_feature_common { @@ -983,6 +1241,7 @@ struct dmub_shared_state_feature_block { struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */ struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */ struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */ + struct dmub_shared_state_cursor_offload_v1 cursor_offload_v1; /**< Cursor offload */ } data; /**< Shared state data. */ }; /* 256-bytes, fixed */ @@ -1572,6 +1831,25 @@ enum dmub_cmd_type { */ DMUB_CMD__IPS = 91, + /** + * Command type use for Cursor offload. + */ + DMUB_CMD__CURSOR_OFFLOAD = 92, + + /** + * Command type used for all SMART_POWER_OLED commands. + */ + DMUB_CMD__SMART_POWER_OLED = 93, + + /** + * Command type use for all Panel Replay commands. + */ + DMUB_CMD__PR = 94, + + + /** + * Command type use for VBIOS shared commands. + */ DMUB_CMD__VBIOS = 128, }; @@ -2369,6 +2647,7 @@ struct dmub_cmd_fams2_global_config { union dmub_cmd_fams2_config { struct dmub_cmd_fams2_global_config global; +// coverity[cert_dcl37_c_violation:FALSE] errno.h, stddef.h, stdint.h not included in atombios.h struct dmub_fams2_stream_static_state stream; //v0 union { struct dmub_fams2_cmd_stream_static_base_state base; @@ -3981,6 +4260,33 @@ enum replay_state { }; /** + * Definition of a panel replay state + */ +enum pr_state { + PR_STATE_0 = 0x00, // State 0 steady state + // Pending SDP and Unlock before back to State 0 + PR_STATE_0_PENDING_SDP_AND_UNLOCK = 0x01, + PR_STATE_1 = 0x10, // State 1 + PR_STATE_2 = 0x20, // State 2 steady state + // Pending frame transmission before transition to State 2 + PR_STATE_2_PENDING_FRAME_TRANSMISSION = 0x30, + // Active and Powered Up + PR_STATE_2_POWERED = 0x31, + // Active and Powered Down, but need to blank HUBP after DPG_EN latch + PR_STATE_2_PENDING_HUBP_BLANK = 0x32, + // Active and Pending Power Up + PR_STATE_2_PENDING_POWER_UP = 0x33, + // Active and Powered Up, Pending DPG latch + PR_STATE_2_PENDING_LOCK_FOR_DPG_POWER_ON = 0x34, + // Active and Powered Up, Pending SDP and Unlock + PR_STATE_2_PENDING_SDP_AND_UNLOCK = 0x35, + // Pending transmission of AS SDP for timing sync, but no rfb update + PR_STATE_2_PENDING_AS_SDP = 0x36, + // Invalid + PR_STATE_INVALID = 0xFF, +}; + +/** * Replay command sub-types. */ enum dmub_cmd_replay_type { @@ -4030,6 +4336,25 @@ enum dmub_cmd_replay_type { DMUB_CMD__REPLAY_SET_GENERAL_CMD = 16, }; +/* + * Panel Replay sub-types + */ +enum dmub_cmd_panel_replay_type { + DMUB_CMD__PR_ENABLE = 0, + DMUB_CMD__PR_COPY_SETTINGS = 1, + DMUB_CMD__PR_UPDATE_STATE = 2, + DMUB_CMD__PR_GENERAL_CMD = 3, +}; + +enum dmub_cmd_panel_replay_state_update_subtype { + PR_STATE_UPDATE_COASTING_VTOTAL = 0x1, + PR_STATE_UPDATE_SYNC_MODE = 0x2, +}; + +enum dmub_cmd_panel_replay_general_subtype { + PR_GENERAL_CMD_DEBUG_OPTION = 0x1, +}; + /** * Replay general command sub-types. */ @@ -4045,6 +4370,7 @@ enum dmub_cmd_replay_general_subtype { REPLAY_GENERAL_CMD_DISABLED_DESYNC_ERROR_DETECTION, REPLAY_GENERAL_CMD_UPDATE_ERROR_STATUS, REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE, + REPLAY_GENERAL_CMD_VIDEO_CONFERENCING, }; struct dmub_alpm_auxless_data { @@ -4182,17 +4508,13 @@ struct dmub_cmd_replay_set_version_data { */ uint8_t panel_inst; /** - * PSR version that FW should implement. + * Replay version that FW should implement. */ enum replay_version version; /** - * PSR control version. - */ - uint8_t cmd_version; - /** * Explicit padding to 4 byte boundary. */ - uint8_t pad[2]; + uint8_t pad[3]; }; /** @@ -4238,6 +4560,45 @@ enum replay_enable { }; /** + * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_OLED_ENABLE command. + */ +struct dmub_rb_cmd_smart_power_oled_enable_data { + /** + * SMART_POWER_OLED enable or disable. + */ + uint8_t enable; + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + + uint16_t peak_nits; + /** + * OTG HW instance. + */ + uint8_t otg_inst; + /** + * DIG FE HW instance. + */ + uint8_t digfe_inst; + /** + * DIG BE HW instance. + */ + uint8_t digbe_inst; + uint8_t debugcontrol; + /* + * vertical interrupt trigger line + */ + uint32_t triggerline; + + uint16_t fixed_max_cll; + + uint8_t pad[2]; +}; + +/** * Data passed from driver to FW in a DMUB_CMD__REPLAY_ENABLE command. */ struct dmub_rb_cmd_replay_enable_data { @@ -4408,9 +4769,9 @@ struct dmub_cmd_replay_set_coasting_vtotal_data { */ uint16_t coasting_vtotal_high; /** - * Explicit padding to 4 byte boundary. + * frame skip number. */ - uint8_t pad[2]; + uint16_t frame_skip_number; }; /** @@ -4571,6 +4932,58 @@ union dmub_replay_cmd_set { }; /** + * SMART POWER OLED command sub-types. + */ +enum dmub_cmd_smart_power_oled_type { + + /** + * Enable/Disable SMART_POWER_OLED. + */ + DMUB_CMD__SMART_POWER_OLED_ENABLE = 1, + /** + * Get current MaxCLL value if SMART POWER OLED is enabled. + */ + DMUB_CMD__SMART_POWER_OLED_GETMAXCLL = 2, +}; + +/** + * Definition of a DMUB_CMD__SMART_POWER_OLED command. + */ +struct dmub_rb_cmd_smart_power_oled_enable { + /** + * Command header. + */ + struct dmub_cmd_header header; + + struct dmub_rb_cmd_smart_power_oled_enable_data data; +}; + +struct dmub_cmd_smart_power_oled_getmaxcll_input { + uint8_t panel_inst; + uint8_t pad[3]; +}; + +struct dmub_cmd_smart_power_oled_getmaxcll_output { + uint16_t current_max_cll; + uint8_t pad[2]; +}; + +/** + * Definition of a DMUB_CMD__SMART_POWER_OLED command. + */ +struct dmub_rb_cmd_smart_power_oled_getmaxcll { + struct dmub_cmd_header header; /**< Command header */ + /** + * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_OLED_GETMAXCLL command. + */ + union dmub_cmd_smart_power_oled_getmaxcll_data { + struct dmub_cmd_smart_power_oled_getmaxcll_input input; /**< Input */ + struct dmub_cmd_smart_power_oled_getmaxcll_output output; /**< Output */ + uint32_t output_raw; /**< Raw data output */ + } data; +}; + +/** * Set of HW components that can be locked. * * Note: If updating with more HW components, fields @@ -4652,6 +5065,7 @@ enum hw_lock_client { */ HW_LOCK_CLIENT_REPLAY = 4, HW_LOCK_CLIENT_FAMS2 = 5, + HW_LOCK_CLIENT_CURSOR_OFFLOAD = 6, /** * Invalid client. */ @@ -6064,6 +6478,257 @@ struct dmub_rb_cmd_ips_query_residency_info { }; /** + * struct dmub_cmd_cursor_offload_init_data - Payload for cursor offload init command. + */ +struct dmub_cmd_cursor_offload_init_data { + union dmub_addr state_addr; /**< State address for dmub_cursor_offload */ + uint32_t state_size; /**< State size for dmub_cursor_offload */ +}; + +/** + * struct dmub_rb_cmd_cursor_offload_init - Data for initializing cursor offload. + */ +struct dmub_rb_cmd_cursor_offload_init { + struct dmub_cmd_header header; + struct dmub_cmd_cursor_offload_init_data init_data; +}; + +/** + * struct dmub_cmd_cursor_offload_stream_data - Payload for cursor offload stream command. + */ +struct dmub_cmd_cursor_offload_stream_data { + uint32_t otg_inst: 4; /**< OTG instance to control */ + uint32_t reserved: 28; /**< Reserved for future use */ + uint32_t line_time_in_ns; /**< Line time in ns for the OTG */ + uint32_t v_total_max; /**< OTG v_total_max */ +}; + +/** + * struct dmub_rb_cmd_cursor_offload_stream_cntl - Controls a stream for cursor offload. + */ +struct dmub_rb_cmd_cursor_offload_stream_cntl { + struct dmub_cmd_header header; + struct dmub_cmd_cursor_offload_stream_data data; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__PR_ENABLE command. + */ +struct dmub_cmd_pr_enable_data { + /** + * Panel Replay enable or disable. + */ + uint8_t enable; + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Phy state to enter. + * Values to use are defined in dmub_phy_fsm_state + */ + uint8_t phy_fsm_state; + /** + * Phy rate for DP - RBR/HBR/HBR2/HBR3. + * Set this using enum phy_link_rate. + * This does not support HDMI/DP2 for now. + */ + uint8_t phy_rate; + /** + * @hpo_stream_enc_inst: HPO stream encoder instance + */ + uint8_t hpo_stream_enc_inst; + /** + * @hpo_link_enc_inst: HPO link encoder instance + */ + uint8_t hpo_link_enc_inst; + /** + * @pad: Align structure to 4 byte boundary. + */ + uint8_t pad[2]; +}; + +/** + * Definition of a DMUB_CMD__PR_ENABLE command. + * Panel Replay enable/disable is controlled using action in data. + */ +struct dmub_rb_cmd_pr_enable { + /** + * Command header. + */ + struct dmub_cmd_header header; + + struct dmub_cmd_pr_enable_data data; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__PR_COPY_SETTINGS command. + */ +struct dmub_cmd_pr_copy_settings_data { + /** + * Flags that can be set by driver to change some replay behaviour. + */ + union pr_debug_flags debug; + + /** + * @flags: Flags used to determine feature functionality. + */ + union pr_hw_flags flags; + + /** + * DPP HW instance. + */ + uint8_t dpp_inst; + /** + * OTG HW instance. + */ + uint8_t otg_inst; + /** + * DIG FE HW instance. + */ + uint8_t digfe_inst; + /** + * DIG BE HW instance. + */ + uint8_t digbe_inst; + /** + * AUX HW instance. + */ + uint8_t aux_inst; + /** + * Panel Instance. + * Panel isntance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Length of each horizontal line in ns. + */ + uint32_t line_time_in_ns; + /** + * PHY instance. + */ + uint8_t dpphy_inst; + /** + * Determines if SMU optimzations are enabled/disabled. + */ + uint8_t smu_optimizations_en; + /* + * Use FSM state for Replay power up/down + */ + uint8_t use_phy_fsm; + /* + * Use FSFT afftet pixel clk + */ + uint32_t pix_clk_100hz; + /* + * Use Original pixel clock + */ + uint32_t sink_pix_clk_100hz; + /** + * Use for AUX-less ALPM LFPS wake operation + */ + struct dmub_alpm_auxless_data auxless_alpm_data; + /** + * @hpo_stream_enc_inst: HPO stream encoder instance + */ + uint8_t hpo_stream_enc_inst; + /** + * @hpo_link_enc_inst: HPO link encoder instance + */ + uint8_t hpo_link_enc_inst; + /** + * @pad: Align structure to 4 byte boundary. + */ + uint8_t pad[2]; +}; + +/** + * Definition of a DMUB_CMD__PR_COPY_SETTINGS command. + */ +struct dmub_rb_cmd_pr_copy_settings { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__PR_COPY_SETTINGS command. + */ + struct dmub_cmd_pr_copy_settings_data data; +}; + +struct dmub_cmd_pr_update_state_data { + /** + * Panel Instance. + * Panel isntance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + + uint8_t pad[3]; // align to 4-byte boundary + /* + * Update flags to control the update behavior. + */ + uint32_t update_flag; + /** + * state/data to set. + */ + uint32_t coasting_vtotal; + uint32_t sync_mode; +}; + +struct dmub_cmd_pr_general_cmd_data { + /** + * Panel Instance. + * Panel isntance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * subtype: PR general cmd sub type + */ + uint8_t subtype; + + uint8_t pad[2]; + /** + * config data by different subtypes + */ + union { + uint32_t u32All; + } data; +}; + +/** + * Definition of a DMUB_CMD__PR_UPDATE_STATE command. + */ +struct dmub_rb_cmd_pr_update_state { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__PR_UPDATE_STATE command. + */ + struct dmub_cmd_pr_update_state_data data; +}; + +/** + * Definition of a DMUB_CMD__PR_GENERAL_CMD command. + */ +struct dmub_rb_cmd_pr_general_cmd { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__PR_GENERAL_CMD command. + */ + struct dmub_cmd_pr_general_cmd_data data; +}; + +/** * union dmub_rb_cmd - DMUB inbox command. */ union dmub_rb_cmd { @@ -6392,6 +7057,38 @@ union dmub_rb_cmd { struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl; struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info; + /** + * Definition of a DMUB_CMD__CURSOR_OFFLOAD_INIT command. + */ + struct dmub_rb_cmd_cursor_offload_init cursor_offload_init; + /** + * Definition of a DMUB_CMD__CURSOR_OFFLOAD control commands. + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_UPDATE_DRR + */ + struct dmub_rb_cmd_cursor_offload_stream_cntl cursor_offload_stream_ctnl; + /** + * Definition of a DMUB_CMD__SMART_POWER_OLED_ENABLE command. + */ + struct dmub_rb_cmd_smart_power_oled_enable smart_power_oled_enable; + /** + * Definition of a DMUB_CMD__DMUB_CMD__SMART_POWER_OLED_GETMAXCLL command. + */ + struct dmub_rb_cmd_smart_power_oled_getmaxcll smart_power_oled_getmaxcll; + /* + * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command. + */ + struct dmub_rb_cmd_pr_copy_settings pr_copy_settings; + /** + * Definition of a DMUB_CMD__REPLAY_ENABLE command. + */ + struct dmub_rb_cmd_pr_enable pr_enable; + + struct dmub_rb_cmd_pr_update_state pr_update_state; + + struct dmub_rb_cmd_pr_general_cmd pr_general_cmd; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 4777c7203b2c..cd04d7c756c3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -380,6 +380,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco; boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0; + boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index ce041f6239dc..7e9856289910 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -89,50 +89,58 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in, void dmub_dcn32_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; - const uint32_t timeout = 100000; - uint32_t in_reset, is_enabled, scratch, i, pwait_mode; + const uint32_t timeout_us = 1 * 1000 * 1000; //1s + const uint32_t poll_delay_us = 1; //1us + uint32_t i = 0; + uint32_t enabled, in_reset, scratch, pwait_mode; - REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); - REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); + REG_GET(DMCUB_CNTL, + DMCUB_ENABLE, &enabled); + REG_GET(DMCUB_CNTL2, + DMCUB_SOFT_RESET, &in_reset); - if (in_reset == 0 && is_enabled != 0) { + if (enabled && in_reset == 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); - for (i = 0; i < timeout; ++i) { - if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) - break; - - udelay(1); - } - - for (i = 0; i < timeout; ++i) { + for (; i < timeout_us; i++) { scratch = REG_READ(DMCUB_SCRATCH7); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; - udelay(1); + udelay(poll_delay_us); } - for (i = 0; i < timeout; ++i) { + for (; i < timeout_us; i++) { REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); if (pwait_mode & (1 << 0)) break; - udelay(1); + udelay(poll_delay_us); } - /* Force reset in case we timed out, DMCUB is likely hung. */ } - if (is_enabled) { + if (enabled) { REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); udelay(1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); } + if (i >= timeout_us) { + /* timeout should never occur */ + BREAK_TO_DEBUGGER(); + } + + REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); @@ -141,7 +149,7 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); - /* Clear the GPINT command manually so we don't send anything during boot. */ + /* Clear the GPINT command manually so we don't reset again. */ cmd.all = 0; dmub->hw_funcs.set_gpint(dmub, cmd); } @@ -163,7 +171,9 @@ void dmub_dcn32_backdoor_load(struct dmub_srv *dmub, dmub_dcn32_get_fb_base_offset(dmub, &fb_base, &fb_offset); + /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); dmub_dcn32_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); @@ -193,7 +203,9 @@ void dmub_dcn32_backdoor_load_zfb_mode(struct dmub_srv *dmub, { union dmub_addr offset; + /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); offset = cw0->offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 834e5434ccb8..e13557ed97be 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -418,6 +418,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.disable_sldo_opt = params->disable_sldo_opt; boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig; boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc; + boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } @@ -520,6 +521,45 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub) dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); } + +bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub) +{ + uint64_t region3_cw5_offset; + uint32_t top_addr, top_addr_enable, offset_low; + uint32_t offset_high, base_addr, fw_version; + bool is_vbios_fw = false; + + memset(&dmub->preos_info, 0, sizeof(dmub->preos_info)); + + fw_version = REG_READ(DMCUB_SCRATCH1); + is_vbios_fw = ((fw_version >> 6) & 0x01) ? true : false; + if (!is_vbios_fw) + return false; + + dmub->preos_info.boot_status = REG_READ(DMCUB_SCRATCH0); + dmub->preos_info.fw_version = REG_READ(DMCUB_SCRATCH1); + dmub->preos_info.boot_options = REG_READ(DMCUB_SCRATCH14); + REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS, + DMCUB_REGION3_CW5_ENABLE, &top_addr_enable); + if (top_addr_enable) { + dmub_dcn35_get_fb_base_offset(dmub, + &dmub->preos_info.fb_base, &dmub->preos_info.fb_offset); + offset_low = REG_READ(DMCUB_REGION3_CW5_OFFSET); + offset_high = REG_READ(DMCUB_REGION3_CW5_OFFSET_HIGH); + region3_cw5_offset = ((uint64_t)offset_high << 32) | offset_low; + dmub->preos_info.trace_buffer_phy_addr = region3_cw5_offset + - dmub->preos_info.fb_base + dmub->preos_info.fb_offset; + + REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS, + DMCUB_REGION3_CW5_TOP_ADDRESS, &top_addr); + base_addr = REG_READ(DMCUB_REGION3_CW5_BASE_ADDRESS) & 0x1FFFFFFF; + dmub->preos_info.trace_buffer_size = + (top_addr > base_addr) ? (top_addr - base_addr + 1) : 0; + } + + return true; +} + void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub) { /* DMCUB_REGION3_TMR_AXI_SPACE values: diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h index 39fcb7275da5..92e6695a2c9b 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h @@ -285,4 +285,6 @@ bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub); void dmub_srv_dcn35_regs_init(struct dmub_srv *dmub, struct dc_context *ctx); +bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub); + #endif /* _DMUB_DCN35_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index b31adbd0d685..95542299e3b3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -81,7 +81,7 @@ void dmub_dcn401_reset(struct dmub_srv *dmub) dmub->hw_funcs.set_gpint(dmub, cmd); for (; i < timeout_us; i++) { - scratch = dmub->hw_funcs.get_gpint_response(dmub); + scratch = REG_READ(DMCUB_SCRATCH7); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; @@ -97,11 +97,24 @@ void dmub_dcn401_reset(struct dmub_srv *dmub) } } + if (enabled) { + REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); + udelay(1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); + } + if (i >= timeout_us) { /* timeout should never occur */ BREAK_TO_DEBUGGER(); } + REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); @@ -134,7 +147,6 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub, /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); @@ -168,7 +180,6 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub, /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); offset = cw0->offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index b17a19400c06..a6ae1d2e9685 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -66,7 +66,7 @@ #define DMUB_SCRATCH_MEM_SIZE (1024) /* Default indirect buffer size. */ -#define DMUB_IB_MEM_SIZE (1280) +#define DMUB_IB_MEM_SIZE (2560) /* Default LSDMA ring buffer size. */ #define DMUB_LSDMA_RB_SIZE (64 * 1024) @@ -359,6 +359,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_current_time = dmub_dcn35_get_current_time; funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data; + funcs->get_preos_fw_info = dmub_dcn35_get_preos_fw_info; funcs->init_reg_offsets = dmub_srv_dcn35_regs_init; if (asic == DMUB_ASIC_DCN351) @@ -564,10 +565,11 @@ enum dmub_status window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE; window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; - window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; + window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = dmub_align(DMUB_SCRATCH_MEM_SIZE, 64); window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE; window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size); window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE; + window_sizes[DMUB_WINDOW_CURSOR_OFFLOAD] = dmub_align(sizeof(struct dmub_cursor_offload_v1), 64); out->fb_size = dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); @@ -652,21 +654,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; - struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; - struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM]; struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE]; struct dmub_rb_init_params rb_params, outbox0_rb_params; struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6; struct dmub_region inbox1, outbox1, outbox0; + uint32_t i; + if (!dmub->sw_init) return DMUB_STATUS_INVALID; - if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb || - !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) { - ASSERT(0); - return DMUB_STATUS_INVALID; + for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) { + if (!params->fb[i]) { + ASSERT(0); + return DMUB_STATUS_INVALID; + } } dmub->fb_base = params->fb_base; @@ -748,9 +751,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->shared_state = shared_state_fb->cpu_addr; - dmub->scratch_mem_fb = *scratch_mem_fb; + dmub->scratch_mem_fb = *params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; + dmub->ib_mem_gart = *params->fb[DMUB_WINDOW_IB_MEM]; - dmub->ib_mem_gart = *ib_mem_gart; + dmub->cursor_offload_fb = *params->fb[DMUB_WINDOW_CURSOR_OFFLOAD]; + dmub->cursor_offload_v1 = (struct dmub_cursor_offload_v1 *)dmub->cursor_offload_fb.cpu_addr; if (dmub->hw_funcs.setup_windows) dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, ®ion6); @@ -1368,3 +1373,11 @@ enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub) return DMUB_STATUS_OK; } + +bool dmub_srv_get_preos_info(struct dmub_srv *dmub) +{ + if (!dmub || !dmub->hw_funcs.get_preos_fw_info) + return false; + + return dmub->hw_funcs.get_preos_fw_info(dmub); +} diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index 812377d9e48f..973b6bdbac63 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -135,12 +135,8 @@ struct bp_external_encoder_control { struct bp_crtc_source_select { enum engine_id engine_id; enum controller_id controller_id; - /* from GPU Tx aka asic_signal */ - enum signal_type signal; - /* sink_signal may differ from asicSignal if Translator encoder */ enum signal_type sink_signal; - enum display_output_bit_depth display_output_bit_depth; - bool enable_dp_audio; + uint8_t bit_depth; }; struct bp_transmitter_control { @@ -166,6 +162,11 @@ struct bp_transmitter_control { bool single_pll_mode; }; +struct bp_load_detection_parameters { + enum engine_id engine_id; + uint16_t device_id; +}; + struct bp_hw_crtc_timing_parameters { enum controller_id controller_id; /* horizontal part */ diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index de8f3cfed6c8..07b937b92efc 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -30,6 +30,22 @@ #ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h #define DP_SINK_HW_REVISION_START 0x409 #endif +/* Panel Replay*/ +#ifndef DP_PANEL_REPLAY_CAPABILITY_SUPPORT // can remove this once the define gets into linux drm_dp_helper.h +#define DP_PANEL_REPLAY_CAPABILITY_SUPPORT 0x0b0 +#endif /* DP_PANEL_REPLAY_CAPABILITY_SUPPORT */ +#ifndef DP_PANEL_REPLAY_CAPABILITY // can remove this once the define gets into linux drm_dp_helper.h +#define DP_PANEL_REPLAY_CAPABILITY 0x0b1 +#endif /* DP_PANEL_REPLAY_CAPABILITY */ +#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 // can remove this once the define gets into linux drm_dp_helper.h +#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 0x1b0 +#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 */ +#ifndef DP_PANEL_REPLAY_ENABLE // can remove this once the define gets into linux drm_dp_helper.h +#define DP_PANEL_REPLAY_ENABLE (1 << 0) +#endif /* DP_PANEL_REPLAY_ENABLE */ +#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 // can remove this once the define gets into linux drm_dp_helper.h +#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 0x1b1 +#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 */ enum dpcd_revision { DPCD_REV_10 = 0x10, diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index cc467031651d..38a77fa9b4af 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -169,6 +169,7 @@ struct dc_firmware_info { uint32_t engine_clk_ss_percentage; } feature; + uint32_t max_pixel_clock; /* in KHz */ uint32_t default_display_engine_pll_frequency; /* in KHz */ uint32_t external_clock_source_frequency_for_dp; /* in KHz */ uint32_t smu_gpu_pll_output_freq; /* in KHz */ diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h index 54e33062b3c0..1386fa124e85 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_id.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h @@ -310,4 +310,11 @@ static inline bool dal_graphics_object_id_equal( } return false; } + +static inline bool dc_connector_supports_analog(const enum connector_id conn) +{ + return conn == CONNECTOR_ID_VGA || + conn == CONNECTOR_ID_SINGLE_LINK_DVII || + conn == CONNECTOR_ID_DUAL_LINK_DVII; +} #endif diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index a10d6b988aab..3a2c2d2fb629 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -118,6 +118,18 @@ static inline bool dc_is_dvi_signal(enum signal_type signal) } } +/** + * dc_is_rgb_signal() - Whether the signal is analog RGB. + * + * Returns whether the given signal type is an analog RGB signal + * that is used with a DAC on VGA or DVI-I connectors. + * Not to be confused with other uses of "RGB", such as RGB color space. + */ +static inline bool dc_is_rgb_signal(enum signal_type signal) +{ + return (signal == SIGNAL_TYPE_RGB); +} + static inline bool dc_is_tmds_signal(enum signal_type signal) { switch (signal) { diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index ce421bcddcb0..1aae46d703ba 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -1260,6 +1260,17 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync, update_v_total_for_static_ramp( core_freesync, stream, in_out_vrr); } + + /* + * If VRR is inactive, set vtotal min and max to nominal vtotal + */ + if (in_out_vrr->state == VRR_STATE_INACTIVE) { + in_out_vrr->adjust.v_total_min = + mod_freesync_calc_v_total_from_refresh(stream, + in_out_vrr->max_refresh_in_uhz); + in_out_vrr->adjust.v_total_max = in_out_vrr->adjust.v_total_min; + return; + } } unsigned long long mod_freesync_calc_nominal_field_rate( diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index c760216a6240..ca402ddcdacc 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -354,7 +354,7 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, /* reset retry counters */ reset_retry_counts(hdcp); - /* reset error trace */ + /* reset trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); /* add display to connection */ @@ -400,7 +400,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, /* clear retry counters */ reset_retry_counts(hdcp); - /* reset error trace */ + /* reset trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); /* remove display */ @@ -464,7 +464,7 @@ enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp, /* clear retry counters */ reset_retry_counts(hdcp); - /* reset error trace */ + /* reset trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); /* set new adjustment */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index a37634942b07..26a351a184f3 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -88,6 +88,7 @@ struct mod_hdcp_transition_input_hdcp2 { uint8_t lc_init_write; uint8_t l_prime_available_poll; uint8_t l_prime_read; + uint8_t l_prime_combo_read; uint8_t l_prime_validation; uint8_t eks_prepare; uint8_t eks_write; @@ -508,7 +509,7 @@ static inline void set_auth_complete(struct mod_hdcp *hdcp, struct mod_hdcp_output *output) { output->auth_complete = 1; - mod_hdcp_log_ddc_trace(hdcp); + HDCP_AUTH_COMPLETE_TRACE(hdcp); } /* connection topology helpers */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 8bc377560787..1bbd728d4345 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -29,6 +29,7 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) { uint64_t n = 0; uint8_t count = 0; + enum mod_hdcp_status status; u8 bksv[sizeof(n)] = { }; memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv)); @@ -38,8 +39,14 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) count++; n &= (n - 1); } - return (count == 20) ? MOD_HDCP_STATUS_SUCCESS : - MOD_HDCP_STATUS_HDCP1_INVALID_BKSV; + + if (count == 20) { + hdcp->connection.trace.hdcp1.attempt_count++; + status = MOD_HDCP_STATUS_SUCCESS; + } else { + status = MOD_HDCP_STATUS_HDCP1_INVALID_BKSV; + } + return status; } static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp) @@ -135,6 +142,8 @@ static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; + hdcp->connection.trace.hdcp1.downstream_device_count = get_device_count(hdcp); + /* Some MST display may choose to report the internal panel as an HDCP RX. * To update this condition with 1(because the immediate repeater's internal * panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index bb8ae80b37f8..27500abf9fee 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -48,6 +48,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; + struct mod_hdcp_trace *trace = &hdcp->connection.trace; if (is_dp_hdcp(hdcp)) status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) && @@ -55,9 +56,14 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; else - status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ? - MOD_HDCP_STATUS_SUCCESS : - MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; + status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi + & HDCP_2_2_HDMI_SUPPORT_MASK) + ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; + + if (status == MOD_HDCP_STATUS_SUCCESS) + trace->hdcp2.attempt_count++; + return status; } @@ -201,10 +207,17 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { + struct mod_hdcp_trace *trace = &hdcp->connection.trace; + /* Avoid device count == 0 to do authentication */ if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; + trace->hdcp2.downstream_device_count = get_device_count(hdcp); + trace->hdcp2.hdcp1_device_downstream = + HDCP_2_2_HDCP1_DEVICE_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]); + trace->hdcp2.hdcp2_legacy_device_downstream = + HDCP_2_2_HDCP_2_0_REP_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]); /* Some MST display may choose to report the internal panel as an HDCP RX. */ /* To update this condition with 1(because the immediate repeater's internal */ /* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */ @@ -452,54 +465,11 @@ out: return status; } -static enum mod_hdcp_status locality_check_sw(struct mod_hdcp *hdcp, - struct mod_hdcp_event_context *event_ctx, - struct mod_hdcp_transition_input_hdcp2 *input) -{ - enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; - - if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init, - &input->lc_init_write, &status, - hdcp, "lc_init_write")) - goto out; - if (is_dp_hdcp(hdcp)) - msleep(16); - else - if (!mod_hdcp_execute_and_set(poll_l_prime_available, - &input->l_prime_available_poll, &status, - hdcp, "l_prime_available_poll")) - goto out; - if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime, - &input->l_prime_read, &status, - hdcp, "l_prime_read")) - goto out; -out: - return status; -} - -static enum mod_hdcp_status locality_check_fw(struct mod_hdcp *hdcp, - struct mod_hdcp_event_context *event_ctx, - struct mod_hdcp_transition_input_hdcp2 *input) -{ - enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; - - if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw, - &input->l_prime_read, &status, - hdcp, "l_prime_read")) - goto out; - -out: - return status; -} - static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, struct mod_hdcp_event_context *event_ctx, struct mod_hdcp_transition_input_hdcp2 *input) { enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; - const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c - && hdcp->config.ddc.funcs.atomic_write_poll_read_aux - && !hdcp->connection.link.adjust.hdcp2.force_sw_locality_check; if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { event_ctx->unexpected_event = 1; @@ -511,9 +481,28 @@ static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, hdcp, "lc_init_prepare")) goto out; - status = (use_fw ? locality_check_fw : locality_check_sw)(hdcp, event_ctx, input); - if (status != MOD_HDCP_STATUS_SUCCESS) - goto out; + if (hdcp->connection.link.adjust.hdcp2.use_fw_locality_check) { + if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw, + &input->l_prime_combo_read, &status, + hdcp, "l_prime_combo_read")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init, + &input->lc_init_write, &status, + hdcp, "lc_init_write")) + goto out; + if (is_dp_hdcp(hdcp)) + msleep(16); + else + if (!mod_hdcp_execute_and_set(poll_l_prime_available, + &input->l_prime_available_poll, &status, + hdcp, "l_prime_available_poll")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime, + &input->l_prime_read, &status, + hdcp, "l_prime_read")) + goto out; + } if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime, &input->l_prime_validation, &status, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c index 89ffb89e1932..9316312a4df5 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c @@ -184,31 +184,33 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK); break; - case H2_A2_LOCALITY_CHECK: { - const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c - && !adjust->hdcp2.force_sw_locality_check; - - /* - * 1A-05: consider disconnection after LC init a failure - * 1A-13-1: consider invalid l' a failure - * 1A-13-2: consider l' timeout a failure - */ + case H2_A2_LOCALITY_CHECK: + /* 1A-05: consider disconnection after LC init a failure */ if (hdcp->state.stay_count > 10 || - input->lc_init_prepare != PASS || - (!use_fw && input->lc_init_write != PASS) || - (!use_fw && input->l_prime_available_poll != PASS)) { + input->lc_init_prepare != PASS) { fail_and_restart_in_ms(0, &status, output); break; - } else if (input->l_prime_read != PASS) { - if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) { - adjust->hdcp2.force_sw_locality_check = true; + } else if (adjust->hdcp2.use_fw_locality_check && + input->l_prime_combo_read != PASS) { + /* 1A-13-2: consider l' timeout a failure */ + if (adjust->hdcp2.use_sw_locality_fallback) { + /* switch to software locality check */ + adjust->hdcp2.use_fw_locality_check = 0; callback_in_ms(0, output); + increment_stay_counter(hdcp); break; } - + fail_and_restart_in_ms(0, &status, output); + break; + } else if (!adjust->hdcp2.use_fw_locality_check && + (input->lc_init_write != PASS || + input->l_prime_available_poll != PASS || + input->l_prime_read != PASS)) { + /* 1A-13-2: consider l' timeout a failure */ fail_and_restart_in_ms(0, &status, output); break; } else if (input->l_prime_validation != PASS) { + /* 1A-13-1: consider invalid l' a failure */ callback_in_ms(0, output); increment_stay_counter(hdcp); break; @@ -216,7 +218,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER); break; - } case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: if (input->eks_prepare != PASS || input->eks_write != PASS) { @@ -510,26 +511,29 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK); break; - case D2_A2_LOCALITY_CHECK: { - const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_aux - && !adjust->hdcp2.force_sw_locality_check; - + case D2_A2_LOCALITY_CHECK: if (hdcp->state.stay_count > 10 || - input->lc_init_prepare != PASS || - (!use_fw && input->lc_init_write != PASS)) { - /* 1A-12: consider invalid l' a failure */ + input->lc_init_prepare != PASS) { fail_and_restart_in_ms(0, &status, output); break; - } else if (input->l_prime_read != PASS) { - if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) { - adjust->hdcp2.force_sw_locality_check = true; + } else if (adjust->hdcp2.use_fw_locality_check && + input->l_prime_combo_read != PASS) { + if (adjust->hdcp2.use_sw_locality_fallback) { + /* switch to software locality check */ + adjust->hdcp2.use_fw_locality_check = 0; callback_in_ms(0, output); + increment_stay_counter(hdcp); break; } - + fail_and_restart_in_ms(0, &status, output); + break; + } else if (!adjust->hdcp2.use_fw_locality_check && + (input->lc_init_write != PASS || + input->l_prime_read != PASS)) { fail_and_restart_in_ms(0, &status, output); break; } else if (input->l_prime_validation != PASS) { + /* 1A-12: consider invalid l' a failure */ callback_in_ms(0, output); increment_stay_counter(hdcp); break; @@ -537,7 +541,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER); break; - } case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: if (input->eks_prepare != PASS || input->eks_write != PASS) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c index 2e6408579194..0ca39873f807 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -758,6 +758,6 @@ enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp) { const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp); - return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_LOCALITY_COMBO_READ_FAILURE; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c index 6b3b5f610907..5cb979c2cf8c 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -125,129 +125,11 @@ void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp) } } +#define CASE_FORMAT(entry) case entry: return #entry; char *mod_hdcp_status_to_str(int32_t status) { switch (status) { - case MOD_HDCP_STATUS_SUCCESS: - return "MOD_HDCP_STATUS_SUCCESS"; - case MOD_HDCP_STATUS_FAILURE: - return "MOD_HDCP_STATUS_FAILURE"; - case MOD_HDCP_STATUS_RESET_NEEDED: - return "MOD_HDCP_STATUS_RESET_NEEDED"; - case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND: - return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND"; - case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND: - return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND"; - case MOD_HDCP_STATUS_INVALID_STATE: - return "MOD_HDCP_STATUS_INVALID_STATE"; - case MOD_HDCP_STATUS_NOT_IMPLEMENTED: - return "MOD_HDCP_STATUS_NOT_IMPLEMENTED"; - case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE: - return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE"; - case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE: - return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE"; - case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE: - return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE"; - case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE: - return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER: - return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER"; - case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE: - return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE"; - case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING: - return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING"; - case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED: - return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED"; - case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY: - return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY"; - case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED: - return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED"; - case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED: - return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED"; - case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV: - return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV"; - case MOD_HDCP_STATUS_DDC_FAILURE: - return "MOD_HDCP_STATUS_DDC_FAILURE"; - case MOD_HDCP_STATUS_INVALID_OPERATION: - return "MOD_HDCP_STATUS_INVALID_OPERATION"; - case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE: - return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE"; - case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING: - return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING"; - case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING: - return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING"; - case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING: - return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED: - return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING: - return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED: - return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED"; - case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY: - return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY"; - case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING: - return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST: - return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST"; - case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE"; - case MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE: - return "MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE"; + MOD_HDCP_STATUS_LIST(CASE_FORMAT) default: return "MOD_HDCP_STATUS_UNKNOWN"; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index 1d83c1b9da10..26553aa4c5ca 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -31,6 +31,7 @@ #define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) #define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__) #define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__) +#define HDCP_LOG_TRA(hdcp) do {} while (0) /* default logs */ #define HDCP_ERROR_TRACE(hdcp, status) \ @@ -131,4 +132,9 @@ HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \ } while (0) +#define HDCP_AUTH_COMPLETE_TRACE(hdcp) do { \ + mod_hdcp_log_ddc_trace(hdcp); \ + HDCP_LOG_TRA(hdcp); \ +} while (0) + #endif // MOD_HDCP_LOG_H_ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index b51ddf2846df..835467225458 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -35,69 +35,74 @@ struct mod_hdcp; #define MAX_NUM_OF_DISPLAYS 6 #define MAX_NUM_OF_ATTEMPTS 4 #define MAX_NUM_OF_ERROR_TRACE 10 +#define MOD_HDCP_STATUS_LIST(FORMAT) \ + FORMAT(MOD_HDCP_STATUS_SUCCESS) \ + FORMAT(MOD_HDCP_STATUS_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_RESET_NEEDED) \ + FORMAT(MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND) \ + FORMAT(MOD_HDCP_STATUS_DISPLAY_NOT_FOUND) \ + FORMAT(MOD_HDCP_STATUS_INVALID_STATE) \ + FORMAT(MOD_HDCP_STATUS_NOT_IMPLEMENTED) \ + FORMAT(MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_INVALID_BKSV) \ + FORMAT(MOD_HDCP_STATUS_DDC_FAILURE) /* TODO: specific errors */ \ + FORMAT(MOD_HDCP_STATUS_INVALID_OPERATION) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_LOCALITY_COMBO_READ_FAILURE) + +#define ENUM_FORMAT(entry) entry, /* detailed return status */ enum mod_hdcp_status { - MOD_HDCP_STATUS_SUCCESS = 0, - MOD_HDCP_STATUS_FAILURE, - MOD_HDCP_STATUS_RESET_NEEDED, - MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND, - MOD_HDCP_STATUS_DISPLAY_NOT_FOUND, - MOD_HDCP_STATUS_INVALID_STATE, - MOD_HDCP_STATUS_NOT_IMPLEMENTED, - MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE, - MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE, - MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE, - MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE, - MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE, - MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE, - MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE, - MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER, - MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE, - MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING, - MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE, - MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED, - MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY, - MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE, - MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED, - MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE, - MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE, - MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE, - MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE, - MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE, - MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE, - MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED, - MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE, - MOD_HDCP_STATUS_HDCP1_INVALID_BKSV, - MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */ - MOD_HDCP_STATUS_INVALID_OPERATION, - MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE, - MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE, - MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE, - MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE, - MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING, - MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING, - MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING, - MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE, - MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED, - MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE, - MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE, - MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE, - MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING, - MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE, - MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE, - MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE, - MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY, - MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE, - MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED, - MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE, - MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING, - MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE, - MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE, - MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST, - MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE, - MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE, - MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE, + MOD_HDCP_STATUS_LIST(ENUM_FORMAT) }; struct mod_hdcp_displayport { @@ -214,8 +219,9 @@ struct mod_hdcp_link_adjustment_hdcp2 { uint8_t force_type : 2; uint8_t force_no_stored_km : 1; uint8_t increase_h_prime_timeout: 1; - uint8_t force_sw_locality_check : 1; - uint8_t reserved : 2; + uint8_t use_fw_locality_check : 1; + uint8_t use_sw_locality_fallback: 1; + uint8_t reserved : 1; }; struct mod_hdcp_link_adjustment { @@ -230,9 +236,23 @@ struct mod_hdcp_error { uint8_t state_id; }; +struct mod_hdcp1_trace { + uint8_t attempt_count; + uint8_t downstream_device_count; +}; + +struct mod_hdcp2_trace { + uint8_t attempt_count; + uint8_t downstream_device_count; + uint8_t hdcp1_device_downstream; + uint8_t hdcp2_legacy_device_downstream; +}; + struct mod_hdcp_trace { struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE]; uint8_t error_count; + struct mod_hdcp1_trace hdcp1; + struct mod_hdcp2_trace hdcp2; }; enum mod_hdcp_encryption_status { @@ -303,10 +323,6 @@ struct mod_hdcp_display_query { struct mod_hdcp_config { struct mod_hdcp_psp psp; struct mod_hdcp_ddc ddc; - struct { - uint8_t lc_enable_sw_fallback : 1; - uint8_t reserved : 7; - } debug; uint8_t index; }; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 29ccd3532d13..fd139b219bf9 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -975,6 +975,34 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, return true; } +void set_replay_frame_skip_number(struct dc_link *link, + enum replay_coasting_vtotal_type type, + uint32_t coasting_vtotal_refresh_rate_mhz, + uint32_t flicker_free_refresh_rate_mhz, + bool is_defer) +{ + uint32_t *frame_skip_number_array = NULL; + uint32_t frame_skip_number = 0; + + if (link == NULL || flicker_free_refresh_rate_mhz == 0 || coasting_vtotal_refresh_rate_mhz == 0) + return; + + if (is_defer) + frame_skip_number_array = link->replay_settings.defer_frame_skip_number_table; + else + frame_skip_number_array = link->replay_settings.frame_skip_number_table; + + if (frame_skip_number_array == NULL) + return; + + frame_skip_number = coasting_vtotal_refresh_rate_mhz / flicker_free_refresh_rate_mhz; + + if (frame_skip_number >= 1) + frame_skip_number_array[type] = frame_skip_number - 1; + else + frame_skip_number_array[type] = 0; +} + void set_replay_defer_update_coasting_vtotal(struct dc_link *link, enum replay_coasting_vtotal_type type, uint32_t vtotal) @@ -987,6 +1015,8 @@ void update_replay_coasting_vtotal_from_defer(struct dc_link *link, { link->replay_settings.coasting_vtotal_table[type] = link->replay_settings.defer_update_coasting_vtotal_table[type]; + link->replay_settings.frame_skip_number_table[type] = + link->replay_settings.defer_frame_skip_number_table[type]; } void set_replay_coasting_vtotal(struct dc_link *link, @@ -1007,6 +1037,9 @@ void calculate_replay_link_off_frame_count(struct dc_link *link, uint8_t max_link_off_frame_count = 0; uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0; + if (!link || link->replay_settings.config.replay_version != DC_FREESYNC_REPLAY) + return; + max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line; pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index 391209a3bf29..87d31d9dce5a 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -60,6 +60,11 @@ void set_replay_coasting_vtotal(struct dc_link *link, void set_replay_defer_update_coasting_vtotal(struct dc_link *link, enum replay_coasting_vtotal_type type, uint32_t vtotal); +void set_replay_frame_skip_number(struct dc_link *link, + enum replay_coasting_vtotal_type type, + uint32_t coasting_vtotal_refresh_rate_Mhz, + uint32_t flicker_free_refresh_rate_Mhz, + bool is_defer); void update_replay_coasting_vtotal_from_defer(struct dc_link *link, enum replay_coasting_vtotal_type type); void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal); diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 75efda2969cf..17945094a138 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -109,6 +109,7 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_VPE, AMD_IP_BLOCK_TYPE_UMSCH_MM, AMD_IP_BLOCK_TYPE_ISP, + AMD_IP_BLOCK_TYPE_RAS, AMD_IP_BLOCK_TYPE_NUM, }; diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h index 2176548e9203..9778822dd2a0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h @@ -60,5 +60,10 @@ #define mmVCE_VCPU_CACHE_SIZE1 0x800C #define mmVCE_VCPU_CACHE_SIZE2 0x800E #define mmVCE_VCPU_CNTL 0x8005 +#define mmVCE_VCPU_SCRATCH7 0x8037 +#define mmVCE_FW_REG_STATUS 0x8384 +#define mmVCE_LMI_FW_PERIODIC_CTRL 0x8388 +#define mmVCE_LMI_FW_START_KEYSEL 0x8386 + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h index ea5b26b11cb1..1f82d6f5abde 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h @@ -61,6 +61,8 @@ #define VCE_RB_WPTR__RB_WPTR__SHIFT 0x00000004 #define VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK 0x00000001L #define VCE_SOFT_RESET__ECPU_SOFT_RESET__SHIFT 0x00000000 +#define VCE_SOFT_RESET__FME_SOFT_RESET_MASK 0x00000004L +#define VCE_SOFT_RESET__FME_SOFT_RESET__SHIFT 0x00000002 #define VCE_STATUS__JOB_BUSY_MASK 0x00000001L #define VCE_STATUS__JOB_BUSY__SHIFT 0x00000000 #define VCE_STATUS__UENC_BUSY_MASK 0x00000100L @@ -95,5 +97,13 @@ #define VCE_VCPU_CNTL__CLK_EN__SHIFT 0x00000000 #define VCE_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00040000L #define VCE_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000012 +#define VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK 0x00010000 +#define VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_SHIFT 0x00000010 +#define VCE_FW_REG_STATUS__BUSY_MASK 0x0000001 +#define VCE_FW_REG_STATUS__BUSY__SHIFT 0x0000001 +#define VCE_FW_REG_STATUS__PASS_MASK 0x0000008 +#define VCE_FW_REG_STATUS__PASS__SHIFT 0x0000003 +#define VCE_FW_REG_STATUS__DONE_MASK 0x0000800 +#define VCE_FW_REG_STATUS__DONE__SHIFT 0x000000b #endif diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 2b0cdb2a2775..2366e68262e6 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -454,7 +454,7 @@ struct amd_pm_funcs { bool gate, int inst); int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id); - int (*set_power_limit)(void *handle, uint32_t n); + int (*set_power_limit)(void *handle, uint32_t limit_type, uint32_t n); int (*get_power_limit)(void *handle, uint32_t *limit, enum pp_power_limit_level pp_limit_level, enum pp_power_type power_type); @@ -532,6 +532,110 @@ struct metrics_table_header { uint8_t content_revision; }; +enum amdgpu_metrics_attr_id { + AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HOTSPOT, + AMDGPU_METRICS_ATTR_ID_TEMPERATURE_MEM, + AMDGPU_METRICS_ATTR_ID_TEMPERATURE_VRSOC, + AMDGPU_METRICS_ATTR_ID_CURR_SOCKET_POWER, + AMDGPU_METRICS_ATTR_ID_AVERAGE_GFX_ACTIVITY, + AMDGPU_METRICS_ATTR_ID_AVERAGE_UMC_ACTIVITY, + AMDGPU_METRICS_ATTR_ID_MEM_MAX_BANDWIDTH, + AMDGPU_METRICS_ATTR_ID_ENERGY_ACCUMULATOR, + AMDGPU_METRICS_ATTR_ID_SYSTEM_CLOCK_COUNTER, + AMDGPU_METRICS_ATTR_ID_ACCUMULATION_COUNTER, + AMDGPU_METRICS_ATTR_ID_PROCHOT_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_PPT_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_SOCKET_THM_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_VR_THM_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_HBM_THM_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_GFXCLK_LOCK_STATUS, + AMDGPU_METRICS_ATTR_ID_PCIE_LINK_WIDTH, + AMDGPU_METRICS_ATTR_ID_PCIE_LINK_SPEED, + AMDGPU_METRICS_ATTR_ID_XGMI_LINK_WIDTH, + AMDGPU_METRICS_ATTR_ID_XGMI_LINK_SPEED, + AMDGPU_METRICS_ATTR_ID_GFX_ACTIVITY_ACC, + AMDGPU_METRICS_ATTR_ID_MEM_ACTIVITY_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_INST, + AMDGPU_METRICS_ATTR_ID_PCIE_L0_TO_RECOV_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_ROVER_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_NAK_SENT_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_NAK_RCVD_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_XGMI_READ_DATA_ACC, + AMDGPU_METRICS_ATTR_ID_XGMI_WRITE_DATA_ACC, + AMDGPU_METRICS_ATTR_ID_XGMI_LINK_STATUS, + AMDGPU_METRICS_ATTR_ID_FIRMWARE_TIMESTAMP, + AMDGPU_METRICS_ATTR_ID_CURRENT_GFXCLK, + AMDGPU_METRICS_ATTR_ID_CURRENT_SOCCLK, + AMDGPU_METRICS_ATTR_ID_CURRENT_VCLK0, + AMDGPU_METRICS_ATTR_ID_CURRENT_DCLK0, + AMDGPU_METRICS_ATTR_ID_CURRENT_UCLK, + AMDGPU_METRICS_ATTR_ID_NUM_PARTITION, + AMDGPU_METRICS_ATTR_ID_PCIE_LC_PERF_OTHER_END_RECOVERY, + AMDGPU_METRICS_ATTR_ID_GFX_BUSY_INST, + AMDGPU_METRICS_ATTR_ID_JPEG_BUSY, + AMDGPU_METRICS_ATTR_ID_VCN_BUSY, + AMDGPU_METRICS_ATTR_ID_GFX_BUSY_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_PPT_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_THM_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC, + AMDGPU_METRICS_ATTR_ID_MAX, +}; + +enum amdgpu_metrics_attr_type { + AMDGPU_METRICS_TYPE_U8, + AMDGPU_METRICS_TYPE_S8, + AMDGPU_METRICS_TYPE_U16, + AMDGPU_METRICS_TYPE_S16, + AMDGPU_METRICS_TYPE_U32, + AMDGPU_METRICS_TYPE_S32, + AMDGPU_METRICS_TYPE_U64, + AMDGPU_METRICS_TYPE_S64, + AMDGPU_METRICS_TYPE_MAX, +}; + +enum amdgpu_metrics_attr_unit { + /* None */ + AMDGPU_METRICS_UNIT_NONE, + /* MHz*/ + AMDGPU_METRICS_UNIT_CLOCK_1, + /* Degree Celsius*/ + AMDGPU_METRICS_UNIT_TEMP_1, + /* Watts*/ + AMDGPU_METRICS_UNIT_POWER_1, + /* In nanoseconds*/ + AMDGPU_METRICS_UNIT_TIME_1, + /* In 10 nanoseconds*/ + AMDGPU_METRICS_UNIT_TIME_2, + /* Speed in GT/s */ + AMDGPU_METRICS_UNIT_SPEED_1, + /* Speed in 0.1 GT/s */ + AMDGPU_METRICS_UNIT_SPEED_2, + /* Bandwidth GB/s */ + AMDGPU_METRICS_UNIT_BW_1, + /* Data in KB */ + AMDGPU_METRICS_UNIT_DATA_1, + /* Percentage */ + AMDGPU_METRICS_UNIT_PERCENT, + AMDGPU_METRICS_UNIT_MAX, +}; + +#define AMDGPU_METRICS_ATTR_UNIT_MASK 0xFF000000 +#define AMDGPU_METRICS_ATTR_UNIT_SHIFT 24 +#define AMDGPU_METRICS_ATTR_TYPE_MASK 0x00F00000 +#define AMDGPU_METRICS_ATTR_TYPE_SHIFT 20 +#define AMDGPU_METRICS_ATTR_ID_MASK 0x000FFC00 +#define AMDGPU_METRICS_ATTR_ID_SHIFT 10 +#define AMDGPU_METRICS_ATTR_INST_MASK 0x000003FF +#define AMDGPU_METRICS_ATTR_INST_SHIFT 0 + +#define AMDGPU_METRICS_ENC_ATTR(unit, type, id, inst) \ + (((u64)(unit) << AMDGPU_METRICS_ATTR_UNIT_SHIFT) | \ + ((u64)(type) << AMDGPU_METRICS_ATTR_TYPE_SHIFT) | \ + ((u64)(id) << AMDGPU_METRICS_ATTR_ID_SHIFT) | (inst)) + /* * gpu_metrics_v1_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v1_1 or later instead. @@ -1221,6 +1325,19 @@ struct gpu_metrics_v1_8 { uint32_t pcie_lc_perf_other_end_recovery; }; +struct gpu_metrics_attr { + /* Field type encoded with AMDGPU_METRICS_ENC_ATTR */ + uint64_t attr_encoding; + /* Attribute value, depends on attr_encoding */ + void *attr_value; +}; + +struct gpu_metrics_v1_9 { + struct metrics_table_header common_header; + int attr_count; + struct gpu_metrics_attr metrics_attrs[]; +}; + /* * gpu_metrics_v2_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v2_1 or later instead. @@ -1703,4 +1820,10 @@ struct amdgpu_partition_metrics_v1_0 { uint64_t gfx_below_host_limit_total_acc[MAX_XCC]; }; +struct amdgpu_partition_metrics_v1_1 { + struct metrics_table_header common_header; + int attr_count; + struct gpu_metrics_attr metrics_attrs[]; +}; + #endif diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index ab1cfc92dbeb..f9629d42ada2 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -345,7 +345,8 @@ union MESAPI__REMOVE_QUEUE { uint32_t unmap_kiq_utility_queue : 1; uint32_t preempt_legacy_gfx_queue : 1; uint32_t unmap_legacy_queue : 1; - uint32_t reserved : 28; + uint32_t remove_queue_after_reset : 1; + uint32_t reserved : 27; }; struct MES_API_STATUS api_status; diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h index 69611c7e30e3..2f12cba4eb66 100644 --- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h @@ -399,7 +399,8 @@ union MESAPI__REMOVE_QUEUE { uint32_t unmap_kiq_utility_queue : 1; uint32_t preempt_legacy_gfx_queue : 1; uint32_t unmap_legacy_queue : 1; - uint32_t reserved : 28; + uint32_t remove_queue_after_reset : 1; + uint32_t reserved : 27; }; struct MES_API_STATUS api_status; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 518d07afc7df..79b174e5326d 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -195,24 +195,6 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, return ret; } -int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) -{ - int ret = 0; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (pp_funcs && pp_funcs->notify_rlc_state) { - mutex_lock(&adev->pm.mutex); - - ret = pp_funcs->notify_rlc_state( - adev->powerplay.pp_handle, - en); - - mutex_unlock(&adev->pm.mutex); - } - - return ret; -} - int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1205,8 +1187,11 @@ int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0; - if (!pp_funcs->get_pp_table) - return 0; + if (!table) + return -EINVAL; + + if (amdgpu_sriov_vf(adev) || !pp_funcs->get_pp_table || adev->scpm_enabled) + return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, @@ -1616,6 +1601,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, } int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, + uint32_t limit_type, uint32_t limit) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1626,7 +1612,7 @@ int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, mutex_lock(&adev->pm.mutex); ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, - limit); + limit_type, limit); mutex_unlock(&adev->pm.mutex); return ret; @@ -1732,7 +1718,10 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0; - if (!pp_funcs->set_pp_table) + if (!buf || !size) + return -EINVAL; + + if (amdgpu_sriov_vf(adev) || !pp_funcs->set_pp_table || adev->scpm_enabled) return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); @@ -2139,3 +2128,10 @@ ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, return ret; } + +const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev) +{ + void *pp_handle = adev->powerplay.pp_handle; + + return smu_get_ras_smu_driver(pp_handle); +} diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index b5fbb0fd1dc0..65296a819e6a 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -108,8 +108,9 @@ const char * const amdgpu_pp_profile_name[] = { static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm) { bool runpm_check = runpm ? adev->in_runpm : false; + bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT); - if (amdgpu_in_reset(adev)) + if (amdgpu_in_reset(adev) || !full_init) return -EBUSY; if (adev->in_suspend && !runpm_check) @@ -173,7 +174,6 @@ static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev) */ static inline void amdgpu_pm_put_access(struct amdgpu_device *adev) { - pm_runtime_mark_last_busy(adev->dev); pm_runtime_put_autosuspend(adev->dev); } @@ -2506,7 +2506,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, .attr_update = pp_dpm_clk_default_attr_update), AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, @@ -2638,6 +2638,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_table)) { + int ret; + char *tmp = NULL; + + ret = amdgpu_dpm_get_pp_table(adev, &tmp); + if (ret == -EOPNOTSUPP || !tmp) + *states = ATTR_STATE_UNSUPPORTED; + else + *states = ATTR_STATE_SUPPORTED; } switch (gc_ver) { @@ -3372,7 +3381,9 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev, to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ? "fastPPT" : "slowPPT"); else - return sysfs_emit(buf, "PPT\n"); + return sysfs_emit(buf, "%s\n", + to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ? + "PPT1" : "PPT"); } static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, @@ -3390,13 +3401,12 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, return err; value = value / 1000000; /* convert to Watt */ - value |= limit_type << 24; err = amdgpu_pm_get_access(adev); if (err < 0) return err; - err = amdgpu_dpm_set_power_limit(adev, value); + err = amdgpu_dpm_set_power_limit(adev, limit_type, value); amdgpu_pm_put_access(adev); @@ -3578,7 +3588,6 @@ static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_m static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0); static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0); -static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1); static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1); static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1); static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1); @@ -3627,7 +3636,6 @@ static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_power1_cap.dev_attr.attr, &sensor_dev_attr_power1_cap_default.dev_attr.attr, &sensor_dev_attr_power1_label.dev_attr.attr, - &sensor_dev_attr_power2_average.dev_attr.attr, &sensor_dev_attr_power2_cap_max.dev_attr.attr, &sensor_dev_attr_power2_cap_min.dev_attr.attr, &sensor_dev_attr_power2_cap.dev_attr.attr, @@ -3826,13 +3834,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; /* only Vangogh has fast PPT limit and power labels */ - if (!(gc_ver == IP_VERSION(10, 3, 1)) && - (attr == &sensor_dev_attr_power2_average.dev_attr.attr || - attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr || + if ((attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr || attr == &sensor_dev_attr_power2_cap.dev_attr.attr || attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr || - attr == &sensor_dev_attr_power2_label.dev_attr.attr)) + attr == &sensor_dev_attr_power2_label.dev_attr.attr) && + (amdgpu_dpm_get_power_limit(adev, &tmp, + PP_PWR_LIMIT_MAX, + PP_PWR_TYPE_FAST) == -EOPNOTSUPP)) return 0; return effective_mode; @@ -4724,14 +4733,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) ret = devm_device_add_group(adev->dev, &amdgpu_pm_policy_attr_group); if (ret) - goto err_out0; + goto err_out1; } if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) { ret = devm_device_add_group(adev->dev, &amdgpu_board_attr_group); if (ret) - goto err_out0; + goto err_out1; if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, (void *)&tmp) != -EOPNOTSUPP) { sysfs_add_file_to_group(&adev->dev->kobj, diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 65c1d98af26c..aa3f427819a0 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -424,8 +424,6 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev); int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, enum pp_mp1_state mp1_state); -int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en); - int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev); int amdgpu_dpm_baco_exit(struct amdgpu_device *adev); @@ -553,7 +551,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, enum pp_power_limit_level pp_limit_level, enum pp_power_type power_type); int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, - uint32_t limit); + uint32_t limit_type, uint32_t limit); int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev); int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, struct seq_file *m); @@ -614,5 +612,6 @@ int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask); bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev); bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev, enum smu_temp_metric_type type); +const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 3a9522c17fee..1f539cc65f41 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -2558,18 +2558,13 @@ static int si_enable_power_containment(struct amdgpu_device *adev, if (enable) { if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive); - if (smc_result != PPSMC_Result_OK) { + if (smc_result != PPSMC_Result_OK) ret = -EINVAL; - ni_pi->pc_enabled = false; - } else { - ni_pi->pc_enabled = true; - } } } else { smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive); if (smc_result != PPSMC_Result_OK) ret = -EINVAL; - ni_pi->pc_enabled = false; } } @@ -7051,13 +7046,20 @@ static void si_set_vce_clock(struct amdgpu_device *adev, if ((old_rps->evclk != new_rps->evclk) || (old_rps->ecclk != new_rps->ecclk)) { /* Turn the clocks on when encoding, off otherwise */ + dev_dbg(adev->dev, "set VCE clocks: %u, %u\n", new_rps->evclk, new_rps->ecclk); + if (new_rps->evclk || new_rps->ecclk) { - /* Place holder for future VCE1.0 porting to amdgpu - vce_v1_0_enable_mgcg(adev, false, false);*/ + amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk); + amdgpu_device_ip_set_clockgating_state( + adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state( + adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_UNGATE); } else { - /* Place holder for future VCE1.0 porting to amdgpu - vce_v1_0_enable_mgcg(adev, true, false); - amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);*/ + amdgpu_device_ip_set_powergating_state( + adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state( + adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE); + amdgpu_asic_set_vce_clocks(adev, 0, 0); } } } @@ -7509,8 +7511,6 @@ static int si_dpm_init(struct amdgpu_device *adev) pi->pasi = CYPRESS_HASI_DFLT; pi->vrc = SISLANDS_VRC_DFLT; - pi->gfx_clock_gating = true; - eg_pi->sclk_deep_sleep = true; si_pi->sclk_deep_sleep_above_low = false; @@ -7521,7 +7521,6 @@ static int si_dpm_init(struct amdgpu_device *adev) eg_pi->dynamic_ac_timing = true; - eg_pi->light_sleep = true; #if defined(CONFIG_ACPI) eg_pi->pcie_performance_request = amdgpu_acpi_is_pcie_performance_request_supported(adev); @@ -7582,6 +7581,7 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle, } else { pl = &ps->performance_levels[current_index]; seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + seq_printf(m, "vce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk); seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h index 11cb7874a6bb..3aed75fbf913 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h @@ -38,11 +38,7 @@ #define MC_ARB_DRAM_TIMING2_2 0xa00 #define MC_ARB_DRAM_TIMING2_3 0xa01 -#define MAX_NO_OF_MVDD_VALUES 2 -#define MAX_NO_VREG_STEPS 32 #define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 -#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32 -#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 #define RV770_ASI_DFLT 1000 #define CYPRESS_HASI_DFLT 400000 #define PCIE_PERF_REQ_PECI_GEN1 2 @@ -51,11 +47,6 @@ #define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */ #define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */ -#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16 - -#define RV770_SMC_TABLE_ADDRESS 0xB000 -#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3 - #define SMC_STROBE_RATIO 0x0F #define SMC_STROBE_ENABLE 0x10 @@ -64,27 +55,6 @@ #define SMC_MC_RTT_ENABLE 0x04 #define SMC_MC_STUTTER_EN 0x08 -#define RV770_SMC_VOLTAGEMASK_VDDC 0 -#define RV770_SMC_VOLTAGEMASK_MVDD 1 -#define RV770_SMC_VOLTAGEMASK_VDDCI 2 -#define RV770_SMC_VOLTAGEMASK_MAX 4 - -#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 -#define NISLANDS_SMC_STROBE_RATIO 0x0F -#define NISLANDS_SMC_STROBE_ENABLE 0x10 - -#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01 -#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02 -#define NISLANDS_SMC_MC_RTT_ENABLE 0x04 -#define NISLANDS_SMC_MC_STUTTER_EN 0x08 - -#define MAX_NO_VREG_STEPS 32 - -#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0 -#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1 -#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2 -#define NISLANDS_SMC_VOLTAGEMASK_MAX 4 - #define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0 #define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1 #define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2 @@ -219,32 +189,6 @@ enum si_cac_config_reg_type SISLANDS_CACCONFIG_MAX }; -enum si_power_level { - SI_POWER_LEVEL_LOW = 0, - SI_POWER_LEVEL_MEDIUM = 1, - SI_POWER_LEVEL_HIGH = 2, - SI_POWER_LEVEL_CTXSW = 3, -}; - -enum si_td { - SI_TD_AUTO, - SI_TD_UP, - SI_TD_DOWN, -}; - -enum si_display_watermark { - SI_DISPLAY_WATERMARK_LOW = 0, - SI_DISPLAY_WATERMARK_HIGH = 1, -}; - -enum si_display_gap -{ - SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, - SI_PM_DISPLAY_GAP_VBLANK = 1, - SI_PM_DISPLAY_GAP_WATERMARK = 2, - SI_PM_DISPLAY_GAP_IGNORE = 3, -}; - extern const struct amdgpu_ip_block_version si_smu_ip_block; struct ni_leakage_coeffients @@ -258,56 +202,6 @@ struct ni_leakage_coeffients u32 t_ref; }; -struct SMC_Evergreen_MCRegisterAddress -{ - uint16_t s0; - uint16_t s1; -}; - -typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress; - -struct evergreen_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -struct evergreen_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -struct SMC_Evergreen_MCRegisterSet -{ - uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet; - -struct SMC_Evergreen_MCRegisters -{ - uint8_t last; - uint8_t reserved[3]; - SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; - SMC_Evergreen_MCRegisterSet data[5]; -}; - -typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; - -struct SMC_NIslands_MCRegisterSet -{ - uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet; - -struct ni_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - struct SMC_NIslands_MCRegisterAddress { uint16_t s0; @@ -316,257 +210,20 @@ struct SMC_NIslands_MCRegisterAddress typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress; -struct SMC_NIslands_MCRegisters -{ - uint8_t last; - uint8_t reserved[3]; - SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; - SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; -}; - -typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters; - -struct evergreen_ulv_param { - bool supported; - struct rv7xx_pl *pl; -}; - -struct evergreen_arb_registers { - u32 mc_arb_dram_timing; - u32 mc_arb_dram_timing2; - u32 mc_arb_rfsh_rate; - u32 mc_arb_burst_time; -}; - -struct at { - u32 rlp; - u32 rmp; - u32 lhp; - u32 lmp; -}; - -struct ni_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_func_cntl_4; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_ad_func_cntl; - u32 mpll_ad_func_cntl_2; - u32 mpll_dq_func_cntl; - u32 mpll_dq_func_cntl_2; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct RV770_SMC_SCLK_VALUE -{ - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t sclk_value; -}; - -typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE; - -struct RV770_SMC_MCLK_VALUE -{ - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL_2; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL_2; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE; - - -struct RV730_SMC_MCLK_VALUE -{ - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL2; - uint32_t vMPLL_FUNC_CNTL3; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE; - -struct RV770_SMC_VOLTAGE_VALUE -{ - uint16_t value; - uint8_t index; - uint8_t padding; -}; - -typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE; - -union RV7XX_SMC_MCLK_VALUE -{ - RV770_SMC_MCLK_VALUE mclk770; - RV730_SMC_MCLK_VALUE mclk730; -}; - -typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE; - -struct RV770_SMC_HW_PERFORMANCE_LEVEL -{ - uint8_t arbValue; - union{ - uint8_t seqValue; - uint8_t ACIndex; - }; - uint8_t displayWatermark; - uint8_t gen2PCIE; - uint8_t gen2XSP; - uint8_t backbias; - uint8_t strobeMode; - uint8_t mcFlags; - uint32_t aT; - uint32_t bSP; - RV770_SMC_SCLK_VALUE sclk; - RV7XX_SMC_MCLK_VALUE mclk; - RV770_SMC_VOLTAGE_VALUE vddc; - RV770_SMC_VOLTAGE_VALUE mvdd; - RV770_SMC_VOLTAGE_VALUE vddci; - uint8_t reserved1; - uint8_t reserved2; - uint8_t stateFlags; - uint8_t padding; -}; - -typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL; - -struct RV770_SMC_SWSTATE -{ - uint8_t flags; - uint8_t padding1; - uint8_t padding2; - uint8_t padding3; - RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; -}; - -typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE; - -struct RV770_SMC_VOLTAGEMASKTABLE -{ - uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX]; - uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX]; -}; - -typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE; - -struct RV770_SMC_STATETABLE -{ - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint8_t highSMIO[MAX_NO_VREG_STEPS]; - uint32_t lowSMIO[MAX_NO_VREG_STEPS]; - RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable; - RV770_SMC_SWSTATE initialState; - RV770_SMC_SWSTATE ACPIState; - RV770_SMC_SWSTATE driverState; - RV770_SMC_SWSTATE ULVState; -}; - -typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE; - -struct vddc_table_entry { - u16 vddc; - u8 vddc_index; - u8 high_smio; - u32 low_smio; -}; - -struct rv770_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mpll_ad_func_cntl; - u32 mpll_ad_func_cntl_2; - u32 mpll_dq_func_cntl; - u32 mpll_dq_func_cntl_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct rv730_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_func_cntl; - u32 mpll_func_cntl2; - u32 mpll_func_cntl3; - u32 mpll_ss; - u32 mpll_ss2; -}; - -union r7xx_clock_registers { - struct rv770_clock_registers rv770; - struct rv730_clock_registers rv730; -}; - struct rv7xx_power_info { /* flags */ - bool mem_gddr5; - bool pcie_gen2; - bool dynamic_pcie_gen2; - bool acpi_pcie_gen2; - bool boot_in_gen2; bool voltage_control; /* vddc */ bool mvdd_control; bool sclk_ss; bool mclk_ss; bool dynamic_ss; - bool gfx_clock_gating; - bool mg_clock_gating; - bool mgcgtssm; - bool power_gating; bool thermal_protection; - bool display_gap; - bool dcodt; - bool ulps; - /* registers */ - union r7xx_clock_registers clk_regs; - u32 s0_vid_lower_smio_cntl; /* voltage */ - u32 vddc_mask_low; - u32 mvdd_mask_low; u32 mvdd_split_frequency; - u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES]; u16 max_vddc; u16 max_vddc_in_table; u16 min_vddc_in_table; - struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS]; - u8 valid_vddc_entries; - /* dc odt */ - u32 mclk_odt_threshold; - u8 odt_value_0[2]; - u8 odt_value_1[2]; /* stored values */ - u32 boot_sclk; u16 acpi_vddc; u32 ref_div; u32 active_auto_throttle_sources; @@ -582,17 +239,6 @@ struct rv7xx_power_info { u32 asi; u32 pasi; u32 vrc; - u32 restricted_levels; - u32 rlp; - u32 rmp; - u32 lhp; - u32 lmp; - /* smc offsets */ - u16 state_table_start; - u16 soft_regs_start; - u16 sram_end; - /* scratch structs */ - RV770_SMC_STATETABLE smc_statetable; }; enum si_pcie_gen { @@ -611,44 +257,12 @@ struct rv7xx_pl { enum si_pcie_gen pcie_gen; /* si+ only */ }; -struct rv7xx_ps { - struct rv7xx_pl high; - struct rv7xx_pl medium; - struct rv7xx_pl low; - bool dc_compatible; -}; - struct si_ps { u16 performance_level_count; bool dc_compatible; struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; }; -struct ni_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -struct ni_cac_data -{ - struct ni_leakage_coeffients leakage_coefficients; - u32 i_leakage; - s32 leakage_minimum_temperature; - u32 pwr_const; - u32 dc_cac_value; - u32 bif_cac_value; - u32 lkge_pwr; - u8 mc_wr_weight; - u8 mc_rd_weight; - u8 allow_ovrflw; - u8 num_win_tdp; - u8 l2num_win_tdp; - u8 lts_truncate_n; -}; - struct evergreen_power_info { /* must be first! */ struct rv7xx_power_info rv7xx; @@ -657,203 +271,33 @@ struct evergreen_power_info { bool dynamic_ac_timing; bool abm; bool mcls; - bool light_sleep; - bool memory_transition; bool pcie_performance_request; - bool pcie_performance_request_registered; bool sclk_deep_sleep; - bool dll_default_on; - bool ls_clock_gating; bool smu_uvd_hs; bool uvd_enabled; /* stored values */ u16 acpi_vddci; - u8 mvdd_high_index; - u8 mvdd_low_index; u32 mclk_edc_wr_enable_threshold; - struct evergreen_mc_reg_table mc_reg_table; struct atom_voltage_table vddc_voltage_table; struct atom_voltage_table vddci_voltage_table; - struct evergreen_arb_registers bootup_arb_registers; - struct evergreen_ulv_param ulv; - struct at ats[2]; - /* smc offsets */ - u16 mc_reg_table_start; struct amdgpu_ps current_rps; - struct rv7xx_ps current_ps; struct amdgpu_ps requested_rps; - struct rv7xx_ps requested_ps; -}; - -struct PP_NIslands_Dpm2PerfLevel -{ - uint8_t MaxPS; - uint8_t TgtAct; - uint8_t MaxPS_StepInc; - uint8_t MaxPS_StepDec; - uint8_t PSST; - uint8_t NearTDPDec; - uint8_t AboveSafeInc; - uint8_t BelowSafeInc; - uint8_t PSDeltaLimit; - uint8_t PSDeltaWin; - uint8_t Reserved[6]; -}; - -typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel; - -struct PP_NIslands_DPM2Parameters -{ - uint32_t TDPLimit; - uint32_t NearTDPLimit; - uint32_t SafePowerLimit; - uint32_t PowerBoostLimit; -}; -typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters; - -struct NISLANDS_SMC_SCLK_VALUE -{ - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t sclk_value; -}; - -typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE; - -struct NISLANDS_SMC_MCLK_VALUE -{ - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL_2; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL_2; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE; - -struct NISLANDS_SMC_VOLTAGE_VALUE -{ - uint16_t value; - uint8_t index; - uint8_t padding; -}; - -typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE; - -struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL -{ - uint8_t arbValue; - uint8_t ACIndex; - uint8_t displayWatermark; - uint8_t gen2PCIE; - uint8_t reserved1; - uint8_t reserved2; - uint8_t strobeMode; - uint8_t mcFlags; - uint32_t aT; - uint32_t bSP; - NISLANDS_SMC_SCLK_VALUE sclk; - NISLANDS_SMC_MCLK_VALUE mclk; - NISLANDS_SMC_VOLTAGE_VALUE vddc; - NISLANDS_SMC_VOLTAGE_VALUE mvdd; - NISLANDS_SMC_VOLTAGE_VALUE vddci; - NISLANDS_SMC_VOLTAGE_VALUE std_vddc; - uint32_t powergate_en; - uint8_t hUp; - uint8_t hDown; - uint8_t stateFlags; - uint8_t arbRefreshState; - uint32_t SQPowerThrottle; - uint32_t SQPowerThrottle_2; - uint32_t reserved[2]; - PP_NIslands_Dpm2PerfLevel dpm2; -}; - -typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL; - -struct NISLANDS_SMC_SWSTATE -{ - uint8_t flags; - uint8_t levelCount; - uint8_t padding2; - uint8_t padding3; - NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[]; -}; - -typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE; - -struct NISLANDS_SMC_VOLTAGEMASKTABLE -{ - uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; - uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; -}; - -typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE; - -#define NISLANDS_MAX_NO_VREG_STEPS 32 - -struct NISLANDS_SMC_STATETABLE -{ - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS]; - uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS]; - NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; - PP_NIslands_DPM2Parameters dpm2Params; - NISLANDS_SMC_SWSTATE initialState; - NISLANDS_SMC_SWSTATE ACPIState; - NISLANDS_SMC_SWSTATE ULVState; - NISLANDS_SMC_SWSTATE driverState; - NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; }; -typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE; - struct ni_power_info { /* must be first! */ struct evergreen_power_info eg; - struct ni_clock_registers clock_registers; - struct ni_mc_reg_table mc_reg_table; u32 mclk_rtt_mode_threshold; /* flags */ - bool use_power_boost_limit; bool support_cac_long_term_average; bool cac_enabled; bool cac_configuration_required; bool driver_calculate_cac_leakage; - bool pc_enabled; bool enable_power_containment; bool enable_cac; bool enable_sq_ramping; - /* smc offsets */ - u16 arb_table_start; - u16 fan_table_start; - u16 cac_table_start; - u16 spll_table_start; - /* CAC stuff */ - struct ni_cac_data cac_data; - u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS]; - const struct ni_cac_weights *cac_weights; - u8 lta_window_size; - u8 lts_truncate; struct si_ps current_ps; struct si_ps requested_ps; - /* scratch structs */ - SMC_NIslands_MCRegisters smc_mc_reg_table; - NISLANDS_SMC_STATETABLE smc_statetable; }; struct si_cac_config_reg @@ -952,7 +396,6 @@ struct si_leakage_voltage struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT]; }; - struct si_ulv_param { bool supported; u32 cg_ulv_control; diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 554492dfa3c0..3aaf3dd71868 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -20,7 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ -#include "pp_debug.h" #include <linux/types.h> #include <linux/kernel.h> #include <linux/gfp.h> @@ -28,12 +27,10 @@ #include <linux/firmware.h> #include <linux/reboot.h> #include "amd_shared.h" -#include "amd_powerplay.h" #include "power_state.h" #include "amdgpu.h" #include "hwmgr.h" #include "amdgpu_dpm_internal.h" -#include "amdgpu_display.h" static const struct amd_pm_funcs pp_dpm_funcs; @@ -634,9 +631,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table) { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table) + if (!hwmgr || !hwmgr->pm_en || !table) return -EINVAL; + if (!hwmgr->soft_pp_table) + return -EOPNOTSUPP; + *table = (char *)hwmgr->soft_pp_table; return hwmgr->soft_pp_table_size; } @@ -955,7 +955,7 @@ static int pp_dpm_switch_power_profile(void *handle, return 0; } -static int pp_set_power_limit(void *handle, uint32_t limit) +static int pp_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit) { struct pp_hwmgr *hwmgr = handle; uint32_t max_power_limit; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c index ac9ec8257f82..38e19e5cad4d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c @@ -139,7 +139,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id, NULL); - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -164,7 +164,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c index f9c0f117725d..0bf1bf5528c2 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c @@ -60,7 +60,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id, NULL); - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -90,7 +90,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c index d3ff6a831ed5..e2ba593faa5d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c @@ -68,7 +68,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", return -EINVAL); - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -98,7 +98,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c index a5c95b180672..e3515156d26f 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c @@ -192,7 +192,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", return ret); - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -223,7 +223,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, @@ -256,7 +256,7 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, @@ -306,7 +306,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!", return ret); - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index fb8086859857..f51fa265230b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -68,7 +68,7 @@ static int smu_handle_task(struct smu_context *smu, static int smu_reset(struct smu_context *smu); static int smu_set_fan_speed_pwm(void *handle, u32 speed); static int smu_set_fan_control_mode(void *handle, u32 value); -static int smu_set_power_limit(void *handle, uint32_t limit); +static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit); static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); @@ -508,11 +508,14 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) /* Enable restore flag */ smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; - /* set the user dpm power limit */ - if (smu->user_dpm_profile.power_limit) { - ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); + /* set the user dpm power limits */ + for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) { + if (!smu->user_dpm_profile.power_limits[i]) + continue; + ret = smu_set_power_limit(smu, i, + smu->user_dpm_profile.power_limits[i]); if (ret) - dev_err(smu->adev->dev, "Failed to set power limit value\n"); + dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i); } /* set the user dpm clock configurations */ @@ -609,6 +612,17 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev) return true; } +int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg, + uint32_t param, uint32_t *read_arg) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret = -EOPNOTSUPP; + + if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg) + ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg); + + return ret; +} static int smu_sys_get_pp_table(void *handle, char **table) @@ -620,7 +634,7 @@ static int smu_sys_get_pp_table(void *handle, return -EOPNOTSUPP; if (!smu_table->power_play_table && !smu_table->hardcode_pptable) - return -EINVAL; + return -EOPNOTSUPP; if (smu_table->hardcode_pptable) *table = smu_table->hardcode_pptable; @@ -1655,9 +1669,12 @@ static int smu_smc_hw_setup(struct smu_context *smu) if (adev->in_suspend && smu_is_dpm_running(smu)) { dev_info(adev->dev, "dpm has been enabled\n"); ret = smu_system_features_control(smu, true); - if (ret) + if (ret) { dev_err(adev->dev, "Failed system features control!\n"); - return ret; + return ret; + } + + return smu_enable_thermal_alert(smu); } break; default: @@ -2040,6 +2057,12 @@ static int smu_disable_dpms(struct smu_context *smu) smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix)) return 0; + /* vangogh s0ix */ + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) && + adev->in_s0ix) + return 0; + /* * For gpu reset, runpm and hibernation through BACO, * BACO feature has to be kept enabled. @@ -2225,7 +2248,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) int ret; struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (amdgpu_sriov_multi_vf_mode(adev)) return 0; @@ -2257,18 +2279,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) adev->pm.dpm_enabled = true; - if (smu->current_power_limit) { - ret = smu_set_power_limit(smu, smu->current_power_limit); - if (ret && ret != -EOPNOTSUPP) - return ret; - } - - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) { - ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0); - if (ret) - return ret; - } - dev_info(adev->dev, "SMU is resumed successfully!\n"); return 0; @@ -2796,6 +2806,17 @@ const struct amdgpu_ip_block_version smu_v14_0_ip_block = { .funcs = &smu_ip_funcs, }; +const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle) +{ + struct smu_context *smu = (struct smu_context *)handle; + const struct ras_smu_drv *tmp = NULL; + int ret; + + ret = smu_get_ras_smu_drv(smu, &tmp); + + return ret ? NULL : tmp; +} + static int smu_load_microcode(void *handle) { struct smu_context *smu = handle; @@ -2889,6 +2910,9 @@ int smu_get_power_limit(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; + if (!limit) + return -EINVAL; + switch (pp_power_type) { case PP_PWR_TYPE_SUSTAINED: limit_type = SMU_DEFAULT_PPT_LIMIT; @@ -2920,6 +2944,8 @@ int smu_get_power_limit(void *handle, if (limit_type != SMU_DEFAULT_PPT_LIMIT) { if (smu->ppt_funcs->get_ppt_limit) ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); + else + return -EOPNOTSUPP; } else { switch (limit_level) { case SMU_PPT_LIMIT_CURRENT: @@ -2958,37 +2984,34 @@ int smu_get_power_limit(void *handle, return ret; } -static int smu_set_power_limit(void *handle, uint32_t limit) +static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit) { struct smu_context *smu = handle; - uint32_t limit_type = limit >> 24; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - limit &= (1<<24)-1; - if (limit_type != SMU_DEFAULT_PPT_LIMIT) - if (smu->ppt_funcs->set_power_limit) - return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); - - if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { - dev_err(smu->adev->dev, - "New power limit (%d) is out of range [%d,%d]\n", - limit, smu->min_power_limit, smu->max_power_limit); - return -EINVAL; + if (limit_type == SMU_DEFAULT_PPT_LIMIT) { + if (!limit) + limit = smu->current_power_limit; + if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { + dev_err(smu->adev->dev, + "New power limit (%d) is out of range [%d,%d]\n", + limit, smu->min_power_limit, smu->max_power_limit); + return -EINVAL; + } } - if (!limit) - limit = smu->current_power_limit; - if (smu->ppt_funcs->set_power_limit) { ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); - if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) - smu->user_dpm_profile.power_limit = limit; + if (ret) + return ret; + if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) + smu->user_dpm_profile.power_limits[limit_type] = limit; } - return ret; + return 0; } static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 582c186d8b62..8815fc70b63b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -212,6 +212,7 @@ enum smu_power_src_type { enum smu_ppt_limit_type { SMU_DEFAULT_PPT_LIMIT = 0, SMU_FAST_PPT_LIMIT, + SMU_LIMIT_TYPE_COUNT, }; enum smu_ppt_limit_level { @@ -231,7 +232,7 @@ enum smu_memory_pool_size { struct smu_user_dpm_profile { uint32_t fan_mode; - uint32_t power_limit; + uint32_t power_limits[SMU_LIMIT_TYPE_COUNT]; uint32_t fan_speed_pwm; uint32_t fan_speed_rpm; uint32_t flags; @@ -1521,6 +1522,21 @@ struct pptable_funcs { */ ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id, void *table); + /** + * @ras_send_msg: Send a message with a parameter from Ras + * &msg: Type of message. + * ¶m: Message parameter. + * &read_arg: SMU response (optional). + */ + int (*ras_send_msg)(struct smu_context *smu, + enum smu_message_type msg, uint32_t param, uint32_t *read_arg); + + + /** + * @get_ras_smu_drv: Get RAS smu driver interface + * Return: ras_smu_drv * + */ + int (*get_ras_smu_drv)(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv); }; typedef enum { @@ -1785,7 +1801,10 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, int level); ssize_t smu_get_pm_policy_info(struct smu_context *smu, enum pp_pm_policy p_type, char *sysbuf); +const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle); +int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg, + uint32_t param, uint32_t *readarg); #endif void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h index bf6aa9620911..dd30d96e1ca2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -87,7 +87,7 @@ typedef enum { /*37*/ FEATURE_DVO = 37, /*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38, /*39*/ FEATURE_GLOBAL_DPM = 39, -/*40*/ FEATURE_NODE_POWER_MANAGER = 40, +/*40*/ FEATURE_HROM_EN = 40, /*41*/ NUM_FEATURES = 41 } FEATURE_LIST_e; @@ -189,7 +189,7 @@ typedef enum { SVI_MAX_TEMP_ENTRIES, // 13 } SVI_TEMP_e; -#define SMU_METRICS_TABLE_VERSION 0x14 +#define SMU_METRICS_TABLE_VERSION 0x15 #define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1 @@ -367,6 +367,11 @@ typedef struct { //Node Power Limit uint32_t MaxNodePowerLimit; + + // PPT1 Configuration + uint32_t PPT1Max; + uint32_t PPT1Min; + uint32_t PPT1Default; } StaticMetricsTable_t; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h index 4b066c42e0ec..d09b6ae9827e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h @@ -105,23 +105,21 @@ #define PPSMC_MSG_UpdatePccWaitDecMaxStr 0x4C #define PPSMC_MSG_ResetSDMA 0x4D #define PPSMC_MSG_GetRasTableVersion 0x4E -#define PPSMC_MSG_GetRmaStatus 0x4F -#define PPSMC_MSG_GetErrorCount 0x50 -#define PPSMC_MSG_GetBadPageCount 0x51 -#define PPSMC_MSG_GetBadPageInfo 0x52 -#define PPSMC_MSG_GetBadPagePaAddrLoHi 0x53 -#define PPSMC_MSG_SetTimestampLoHi 0x54 -#define PPSMC_MSG_GetTimestampLoHi 0x55 -#define PPSMC_MSG_GetRasPolicy 0x56 -#define PPSMC_MSG_DumpErrorRecord 0x57 +#define PPSMC_MSG_GetBadPageCount 0x50 +#define PPSMC_MSG_GetBadPageMcaAddress 0x51 +#define PPSMC_MSG_SetTimestamp 0x53 +#define PPSMC_MSG_SetTimestampHi 0x54 +#define PPSMC_MSG_GetTimestamp 0x55 +#define PPSMC_MSG_GetBadPageIpIdLoHi 0x57 #define PPSMC_MSG_EraseRasTable 0x58 #define PPSMC_MSG_GetStaticMetricsTable 0x59 #define PPSMC_MSG_ResetVfArbitersByIndex 0x5A -#define PPSMC_MSG_GetBadPageSeverity 0x5B #define PPSMC_MSG_GetSystemMetricsTable 0x5C #define PPSMC_MSG_GetSystemMetricsVersion 0x5D #define PPSMC_MSG_ResetVCN 0x5E -#define PPSMC_Message_Count 0x5F +#define PPSMC_MSG_SetFastPptLimit 0x5F +#define PPSMC_MSG_GetFastPptLimit 0x60 +#define PPSMC_Message_Count 0x61 //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index 2256c77da636..9b71a8afdd35 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -279,7 +279,16 @@ __SMU_DUMMY_MAP(ResetSDMA), \ __SMU_DUMMY_MAP(ResetVCN), \ __SMU_DUMMY_MAP(GetStaticMetricsTable), \ - __SMU_DUMMY_MAP(GetSystemMetricsTable), + __SMU_DUMMY_MAP(GetSystemMetricsTable), \ + __SMU_DUMMY_MAP(GetRASTableVersion), \ + __SMU_DUMMY_MAP(GetBadPageCount), \ + __SMU_DUMMY_MAP(GetBadPageMcaAddr), \ + __SMU_DUMMY_MAP(SetTimestamp), \ + __SMU_DUMMY_MAP(GetTimestamp), \ + __SMU_DUMMY_MAP(GetBadPageIpid), \ + __SMU_DUMMY_MAP(EraseRasTable), \ + __SMU_DUMMY_MAP(SetFastPptLimit), \ + __SMU_DUMMY_MAP(GetFastPptLimit), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -458,7 +467,8 @@ enum smu_clk_type { __SMU_DUMMY_MAP(GFX_EDC_XVMIN), \ __SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \ __SMU_DUMMY_MAP(FAN_ABNORMAL), \ - __SMU_DUMMY_MAP(PIT), + __SMU_DUMMY_MAP(PIT), \ + __SMU_DUMMY_MAP(HROM_EN), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index 9548bd3c624b..55401e6b2b0b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -291,11 +291,12 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int ret = 0, size = 0; + int ret = 0, size = 0, start_offset = 0; uint32_t cur_value = 0; int i; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -353,7 +354,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, return ret; } - return size; + return size - start_offset; } static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 0028f10ead42..7c9f77124ab2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1469,7 +1469,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { uint16_t *curve_settings; - int i, levels, size = 0, ret = 0; + int i, levels, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1484,6 +1484,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_GFXCLK: @@ -1497,11 +1498,11 @@ static int navi10_print_clk_levels(struct smu_context *smu, case SMU_DCEFCLK: ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value); if (ret) - return size; + return size - start_offset; ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count); if (ret) - return size; + return size - start_offset; ret = navi10_is_support_fine_grained_dpm(smu, clk_type); if (ret < 0) @@ -1511,7 +1512,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) - return size; + return size - start_offset; size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, cur_value == value ? "*" : ""); @@ -1519,10 +1520,10 @@ static int navi10_print_clk_levels(struct smu_context *smu, } else { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]); if (ret) - return size; + return size - start_offset; ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]); if (ret) - return size; + return size - start_offset; freq_values[1] = cur_value; mark_index = cur_value == freq_values[0] ? 0 : @@ -1653,7 +1654,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int navi10_force_clk_levels(struct smu_context *smu, @@ -2888,7 +2889,7 @@ static int navi10_set_dummy_pstates_table_location(struct smu_context *smu) dummy_table += 0x1000; } - amdgpu_asic_flush_hdp(smu->adev, NULL); + amdgpu_hdp_flush(smu->adev, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 31c2c0386b1f..774283ac7827 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1281,7 +1281,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings; OverDriveTable_t *od_table = (OverDriveTable_t *)table_context->overdrive_table; - int i, size = 0, ret = 0; + int i, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1289,6 +1289,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_GFXCLK: @@ -1434,7 +1435,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } static int sienna_cichlid_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 78e4186d06cc..b0d6487171d7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1022,7 +1022,12 @@ int smu_v11_0_enable_thermal_alert(struct smu_context *smu) int smu_v11_0_disable_thermal_alert(struct smu_context *smu) { - return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); + int ret = 0; + + if (smu->smu_table.thermal_controller_type) + ret = amdgpu_irq_put(smu->adev, &smu->irq_source, 0); + + return ret; } static uint16_t convert_to_vddc(uint8_t vid) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 2c9869feba61..9626da2dba58 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -565,7 +565,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -576,6 +576,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -658,7 +659,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int vangogh_print_clk_levels(struct smu_context *smu, @@ -666,7 +667,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, { DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; uint32_t min, max; @@ -678,6 +679,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -779,7 +781,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int vangogh_common_print_clk_levels(struct smu_context *smu, @@ -2217,6 +2219,9 @@ static int vangogh_post_smu_init(struct smu_context *smu) uint32_t total_cu = adev->gfx.config.max_cu_per_sh * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; + if (adev->in_s0ix) + return 0; + /* allow message will be sent after enable message on Vangogh*/ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { @@ -2308,8 +2313,7 @@ static int vangogh_get_power_limit(struct smu_context *smu, uint32_t *max_power_limit, uint32_t *min_power_limit) { - struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; + struct smu_11_5_power_context *power_context = smu->smu_power.power_context; uint32_t ppt_limit; int ret = 0; @@ -2345,12 +2349,11 @@ static int vangogh_get_power_limit(struct smu_context *smu, } static int vangogh_get_ppt_limit(struct smu_context *smu, - uint32_t *ppt_limit, - enum smu_ppt_limit_type type, - enum smu_ppt_limit_level level) + uint32_t *ppt_limit, + enum smu_ppt_limit_type type, + enum smu_ppt_limit_level level) { - struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; + struct smu_11_5_power_context *power_context = smu->smu_power.power_context; if (!power_context) return -EOPNOTSUPP; @@ -2399,7 +2402,6 @@ static int vangogh_set_power_limit(struct smu_context *smu, smu->current_power_limit = ppt_limit; break; case SMU_FAST_PPT_LIMIT: - ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24); if (ppt_limit > power_context->max_fast_ppt_limit) { dev_err(smu->adev->dev, "New power limit (%d) is over the max allowed %d\n", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 3baf20f4c373..eaa9ea162f16 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) static int renoir_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; bool cur_value_match_level = false; @@ -506,6 +506,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_RANGE: @@ -550,7 +551,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, i == 2 ? "*" : ""); } - return size; + return size - start_offset; case SMU_SOCCLK: count = NUM_SOCCLK_DPM_LEVELS; cur_value = metrics.ClockFrequency[CLOCK_SOCCLK]; @@ -607,7 +608,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index c1062e5f0393..677781060246 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1195,15 +1195,16 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1534,7 +1535,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c index cb3fea9e8cf3..9e635f733fbf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -34,6 +34,7 @@ #include "amdgpu_fru_eeprom.h" #include <linux/pci.h> #include "smu_cmn.h" +#include "amdgpu_ras.h" #undef MP1_Public #undef smnMP1_FIRMWARE_FLAGS @@ -58,7 +59,7 @@ #define NUM_JPEG_RINGS_FW 10 #define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \ - (ARRAY_SIZE(gpu_metrics->xcp_stats[0].jpeg_busy) / 4) + (ARRAY_SIZE(gpu_metrics->jpeg_busy) / 4) const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = { SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION), @@ -81,6 +82,7 @@ const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK), SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK), SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_HROM_EN_BIT, FEATURE_HROM_EN), }; const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = { @@ -139,6 +141,15 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0), MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1), MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1), + MSG_MAP(GetRASTableVersion, PPSMC_MSG_GetRasTableVersion, 0), + MSG_MAP(GetBadPageCount, PPSMC_MSG_GetBadPageCount, 0), + MSG_MAP(GetBadPageMcaAddr, PPSMC_MSG_GetBadPageMcaAddress, 0), + MSG_MAP(SetTimestamp, PPSMC_MSG_SetTimestamp, 0), + MSG_MAP(GetTimestamp, PPSMC_MSG_GetTimestamp, 0), + MSG_MAP(GetBadPageIpid, PPSMC_MSG_GetBadPageIpIdLoHi, 0), + MSG_MAP(EraseRasTable, PPSMC_MSG_EraseRasTable, 0), + MSG_MAP(SetFastPptLimit, PPSMC_MSG_SetFastPptLimit, 1), + MSG_MAP(GetFastPptLimit, PPSMC_MSG_GetFastPptLimit, 1), }; int smu_v13_0_12_tables_init(struct smu_context *smu) @@ -345,6 +356,12 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS))) pptable->MaxNodePowerLimit = SMUQ10_ROUND(static_metrics->MaxNodePowerLimit); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) && + static_metrics->PPT1Max) { + pptable->PPT1Max = static_metrics->PPT1Max; + pptable->PPT1Min = static_metrics->PPT1Min; + pptable->PPT1Default = static_metrics->PPT1Default; + } smu_v13_0_12_init_xgmi_data(smu, static_metrics); pptable->Init = true; } @@ -449,7 +466,7 @@ static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu) return ret; } - amdgpu_asic_invalidate_hdp(smu->adev, NULL); + amdgpu_hdp_invalidate(smu->adev, NULL); smu_table_cache_update_time(sys_table, jiffies); memcpy(sys_table->cache.buffer, table->cpu_addr, smu_v13_0_12_get_system_metrics_size()); @@ -719,15 +736,14 @@ static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu, ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) { const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW; - struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct smu_v13_0_6_partition_metrics *xcp_metrics; struct amdgpu_device *adev = smu->adev; MetricsTable_t *metrics; int inst, j, k, idx; u32 inst_mask; metrics = (MetricsTable_t *)smu_metrics; - xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table; - smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table; amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); idx = 0; for_each_inst(k, inst_mask) { @@ -772,22 +788,17 @@ ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp return sizeof(*xcp_metrics); } -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics) +void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, + void *smu_metrics, + struct smu_v13_0_6_gpu_metrics *gpu_metrics) { - struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_8 *gpu_metrics = - (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; - int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; + int ret = 0, xcc_id, inst, i, j; u8 num_jpeg_rings_gpu_metrics; MetricsTable_t *metrics; - struct amdgpu_xcp *xcp; - u32 inst_mask; metrics = (MetricsTable_t *)smu_metrics; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); - gpu_metrics->temperature_hotspot = SMUQ10_ROUND(metrics->MaxSocketTemperature); /* Individual HBM stack temperature is not reported */ @@ -877,60 +888,186 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void gpu_metrics->xgmi_link_status[j] = ret; } - gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; - num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics); - for_each_xcp(adev->xcp_mgr, xcp, i) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - /* Both JPEG and VCN has same instances */ - inst = GET_INST(VCN, k); - - for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) { - gpu_metrics->xcp_stats[i].jpeg_busy - [(idx * num_jpeg_rings_gpu_metrics) + j] = - SMUQ10_ROUND(metrics->JpegBusy - [(inst * NUM_JPEG_RINGS_FW) + j]); - } - gpu_metrics->xcp_stats[i].vcn_busy[idx] = - SMUQ10_ROUND(metrics->VcnBusy[inst]); - idx++; + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + inst = GET_INST(VCN, i); + + for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) { + gpu_metrics->jpeg_busy[(i * num_jpeg_rings_gpu_metrics) + + j] = + SMUQ10_ROUND( + metrics->JpegBusy[(inst * + NUM_JPEG_RINGS_FW) + + j]); } + gpu_metrics->vcn_busy[i] = SMUQ10_ROUND(metrics->VcnBusy[inst]); + } - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - inst = GET_INST(GC, k); - gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = - SMUQ10_ROUND(metrics->GfxBusy[inst]); - gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = - SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); - if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { - gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); - } - idx++; - } + for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) { + inst = GET_INST(GC, i); + gpu_metrics->gfx_busy_inst[i] = + SMUQ10_ROUND(metrics->GfxBusy[inst]); + gpu_metrics->gfx_busy_acc[i] = + SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + if (smu_v13_0_6_cap_supported(smu, + SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics + ->gfx_below_host_limit_ppt_acc[i] = SMUQ10_ROUND( + metrics->GfxclkBelowHostLimitPptAcc[inst]); + gpu_metrics + ->gfx_below_host_limit_thm_acc[i] = SMUQ10_ROUND( + metrics->GfxclkBelowHostLimitThmAcc[inst]); + gpu_metrics->gfx_low_utilization_acc[i] = SMUQ10_ROUND( + metrics->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->gfx_below_host_limit_total_acc + [i] = SMUQ10_ROUND( + metrics->GfxclkBelowHostLimitTotalAcc[inst]); + }; } gpu_metrics->xgmi_link_width = metrics->XgmiWidth; gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate; gpu_metrics->firmware_timestamp = metrics->Timestamp; - - *table = (void *)gpu_metrics; - - return sizeof(*gpu_metrics); } const struct smu_temp_funcs smu_v13_0_12_temp_funcs = { .temp_metrics_is_supported = smu_v13_0_12_is_temp_metrics_supported, .get_temp_metrics = smu_v13_0_12_get_temp_metrics, }; + +static int smu_v13_0_12_get_ras_table_version(struct amdgpu_device *adev, + uint32_t *table_version) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetRASTableVersion, 0, table_version); +} + +static int smu_v13_0_12_get_badpage_count(struct amdgpu_device *adev, uint32_t *count, + uint32_t timeout) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint64_t end, now; + int ret = 0; + + now = (uint64_t)ktime_to_ms(ktime_get()); + end = now + timeout; + do { + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageCount, 0, count); + /* eeprom is not ready */ + if (ret != -EBUSY) + return ret; + mdelay(10); + now = (uint64_t)ktime_to_ms(ktime_get()); + } while (now < end); + + dev_err(adev->dev, + "smu get bad page count timeout!\n"); + return ret; +} + +static int smu_v13_0_12_set_timestamp(struct amdgpu_device *adev, uint64_t timestamp) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetTimestamp, (uint32_t)timestamp, 0); +} + +static int smu_v13_0_12_get_timestamp(struct amdgpu_device *adev, + uint16_t index, uint64_t *timestamp) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t temp; + int ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetTimestamp, index, &temp); + if (!ret) + *timestamp = temp; + + return ret; +} + +static int smu_v13_0_12_get_badpage_ipid(struct amdgpu_device *adev, + uint16_t index, uint64_t *ipid) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t temp_arg, temp_ipid_lo, temp_ipid_high; + int ret; + + temp_arg = index | (1 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_lo); + if (ret) + return ret; + + temp_arg = index | (2 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_high); + if (!ret) + *ipid = (uint64_t)temp_ipid_high << 32 | temp_ipid_lo; + return ret; +} + +static int smu_v13_0_12_erase_ras_table(struct amdgpu_device *adev, + uint32_t *result) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_EraseRasTable, 0, result); +} + +static int smu_v13_0_12_get_badpage_mca_addr(struct amdgpu_device *adev, + uint16_t index, uint64_t *mca_addr) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t temp_arg, temp_addr_lo, temp_addr_high; + int ret; + + temp_arg = index | (1 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_lo); + if (ret) + return ret; + + temp_arg = index | (2 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_high); + if (!ret) + *mca_addr = (uint64_t)temp_addr_high << 32 | temp_addr_lo; + return ret; +} + +static const struct ras_eeprom_smu_funcs smu_v13_0_12_eeprom_smu_funcs = { + .get_ras_table_version = smu_v13_0_12_get_ras_table_version, + .get_badpage_count = smu_v13_0_12_get_badpage_count, + .get_badpage_mca_addr = smu_v13_0_12_get_badpage_mca_addr, + .set_timestamp = smu_v13_0_12_set_timestamp, + .get_timestamp = smu_v13_0_12_get_timestamp, + .get_badpage_ipid = smu_v13_0_12_get_badpage_ipid, + .erase_ras_table = smu_v13_0_12_erase_ras_table, +}; + +static void smu_v13_0_12_ras_smu_feature_flags(struct amdgpu_device *adev, uint64_t *flags) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + if (!flags) + return; + + *flags = 0ULL; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(RAS_EEPROM))) + *flags |= RAS_SMU_FEATURE_BIT__RAS_EEPROM; + +} + +const struct ras_smu_drv smu_v13_0_12_ras_smu_drv = { + .smu_eeprom_funcs = &smu_v13_0_12_eeprom_smu_funcs, + .ras_smu_feature_flags = smu_v13_0_12_ras_smu_feature_flags, +}; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index b081ae3e8f43..6908f9930f16 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -497,11 +497,12 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -565,7 +566,7 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v13_0_4_read_sensor(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index f5db181ef489..4576bf008b22 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -861,11 +861,12 @@ out: static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min = 0, max = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -928,7 +929,7 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 285cf7979693..44e1cd821eec 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -356,6 +356,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) if (fw_ver > 0x04560900) smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); + if (fw_ver >= 0x04560D00) + smu_v13_0_6_cap_set(smu, SMU_CAP(FAST_PPT)); + if (fw_ver >= 0x04560700) { if (fw_ver >= 0x04560900) { smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS)); @@ -450,7 +453,8 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) ((pgm == 4) && (fw_ver >= 0x4557000))) smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); - if ((pgm == 0) && (fw_ver >= 0x00558200)) + if ((pgm == 0 && fw_ver >= 0x00558200) || + (pgm == 7 && fw_ver >= 0x07551400)) smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); } @@ -548,7 +552,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - void *gpu_metrics_table __free(kfree) = NULL; + struct smu_v13_0_6_gpu_metrics *gpu_metrics; void *driver_pptable __free(kfree) = NULL; void *metrics_table __free(kfree) = NULL; struct amdgpu_device *adev = smu->adev; @@ -578,24 +582,28 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8); - gpu_metrics_table = - kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); - if (!gpu_metrics_table) - return -ENOMEM; - driver_pptable = kzalloc(sizeof(struct PPTable_t), GFP_KERNEL); if (!driver_pptable) return -ENOMEM; + ret = smu_table_cache_init(smu, SMU_TABLE_SMU_METRICS, + sizeof(struct smu_v13_0_6_gpu_metrics), 1); + if (ret) + return ret; + + gpu_metrics = (struct smu_v13_0_6_gpu_metrics + *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer); + + smu_v13_0_6_gpu_metrics_init(gpu_metrics, 1, 9); if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) { ret = smu_v13_0_12_tables_init(smu); - if (ret) + if (ret) { + smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS); return ret; + } } - smu_table->gpu_metrics_table = no_free_ptr(gpu_metrics_table); smu_table->metrics_table = no_free_ptr(metrics_table); smu_table->driver_pptable = no_free_ptr(driver_pptable); @@ -731,6 +739,7 @@ static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu) { if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) smu_v13_0_12_tables_fini(smu); + smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS); return smu_v13_0_fini_smc_tables(smu); } @@ -765,7 +774,7 @@ int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, return ret; } - amdgpu_asic_invalidate_hdp(smu->adev, NULL); + amdgpu_hdp_invalidate(smu->adev, NULL); memcpy(smu_table->metrics_table, table->cpu_addr, table_size); smu_table->metrics_time = jiffies; @@ -844,12 +853,23 @@ int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu) return ret; } - amdgpu_asic_invalidate_hdp(smu->adev, NULL); + amdgpu_hdp_invalidate(smu->adev, NULL); memcpy(smu_table->metrics_table, table->cpu_addr, table_size); return 0; } +static void smu_v13_0_6_update_caps(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) && + !pptable->PPT1Max) + smu_v13_0_6_cap_clear(smu, SMU_CAP(FAST_PPT)); +} + static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; @@ -866,8 +886,12 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) uint8_t max_width; if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && - smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) - return smu_v13_0_12_setup_driver_pptable(smu); + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + ret = smu_v13_0_12_setup_driver_pptable(smu); + if (ret) + return ret; + goto out; + } /* Store one-time values in driver PPTable */ if (!pptable->Init) { @@ -947,7 +971,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) smu_v13_0_6_fill_static_metrics_table(smu, static_metrics); } } - +out: + smu_v13_0_6_update_caps(smu); return 0; } @@ -1393,7 +1418,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size, return -EINVAL; if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) { - size = sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk); + size += sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk); for (i = 0; i < clocks.num_levels; i++) size += sysfs_emit_at(buf, size, "%d: %uMhz\n", i, clocks.data[i].clocks_in_khz / @@ -1428,7 +1453,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size, static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, enum smu_clk_type type, char *buf) { - int now, size = 0; + int now, size = 0, start_offset = 0; int ret = 0; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; struct smu_13_0_dpm_table *single_dpm_table; @@ -1437,10 +1462,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, uint32_t min_clk, max_clk; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } dpm_context = smu_dpm->dpm_context; @@ -1512,9 +1538,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.uclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "mclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "mclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_SOCCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now); @@ -1526,9 +1556,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.soc_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "socclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "socclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_FCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK, &now); @@ -1540,9 +1574,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.fclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "fclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "fclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_VCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK, &now); @@ -1554,9 +1592,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.vclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "vclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "vclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_DCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK, &now); @@ -1568,14 +1610,18 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.dclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "dclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "dclk"); + if (ret < 0) + return ret; + size += ret; + break; default: break; } - return size; + return size - start_offset; } static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max, @@ -1845,7 +1891,7 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu, if (current_power_limit) *current_power_limit = power_limit; if (default_power_limit) - *default_power_limit = power_limit; + *default_power_limit = pptable->MaxSocketPowerLimit; if (max_power_limit) { *max_power_limit = pptable->MaxSocketPowerLimit; @@ -1860,9 +1906,66 @@ static int smu_v13_0_6_set_power_limit(struct smu_context *smu, enum smu_ppt_limit_type limit_type, uint32_t limit) { + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + int ret; + + if (limit_type == SMU_FAST_PPT_LIMIT) { + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT))) + return -EOPNOTSUPP; + if (limit > pptable->PPT1Max || limit < pptable->PPT1Min) { + dev_err(smu->adev->dev, + "New power limit (%d) should be between min %d max %d\n", + limit, pptable->PPT1Min, pptable->PPT1Max); + return -EINVAL; + } + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetFastPptLimit, + limit, NULL); + if (ret) + dev_err(smu->adev->dev, "Set fast PPT limit failed!\n"); + return ret; + } + return smu_v13_0_set_power_limit(smu, limit_type, limit); } +static int smu_v13_0_6_get_ppt_limit(struct smu_context *smu, + uint32_t *ppt_limit, + enum smu_ppt_limit_type type, + enum smu_ppt_limit_level level) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + int ret = 0; + + if (type == SMU_FAST_PPT_LIMIT) { + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT))) + return -EOPNOTSUPP; + switch (level) { + case SMU_PPT_LIMIT_MAX: + *ppt_limit = pptable->PPT1Max; + break; + case SMU_PPT_LIMIT_CURRENT: + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPptLimit, ppt_limit); + if (ret) + dev_err(smu->adev->dev, "Get fast PPT limit failed!\n"); + break; + case SMU_PPT_LIMIT_DEFAULT: + *ppt_limit = pptable->PPT1Default; + break; + case SMU_PPT_LIMIT_MIN: + *ppt_limit = pptable->PPT1Min; + break; + default: + return -EOPNOTSUPP; + } + return ret; + } + return -EOPNOTSUPP; +} + static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -2383,7 +2486,7 @@ static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu, memcpy(table->cpu_addr, table_data, table_size); /* Flush hdp cache */ - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction, NULL); @@ -2627,7 +2730,7 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, { const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; int version = smu_v13_0_6_get_metrics_version(smu); - struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct smu_v13_0_6_partition_metrics *xcp_metrics; MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; struct amdgpu_device *adev = smu->adev; int ret, inst, i, j, k, idx; @@ -2647,8 +2750,8 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, if (i == adev->xcp_mgr->num_xcps) return -EINVAL; - xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table; - smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table; + smu_v13_0_6_partition_metrics_init(xcp_metrics, 1, 1); metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); if (!metrics_v0) @@ -2740,18 +2843,16 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_8 *gpu_metrics = - (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; + struct smu_table *tables = smu_table->tables; + struct smu_v13_0_6_gpu_metrics *gpu_metrics; int version = smu_v13_0_6_get_metrics_version(smu); MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; - int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; + int ret = 0, xcc_id, inst, i, j; MetricsTableV1_t *metrics_v1; MetricsTableV2_t *metrics_v2; - struct amdgpu_xcp *xcp; u16 link_width_level; u8 num_jpeg_rings; - u32 inst_mask; bool per_inst; metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); @@ -2759,16 +2860,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table if (ret) return ret; - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == - IP_VERSION(13, 0, 12) && - smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) - return smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0); + metrics_v2 = (MetricsTableV2_t *)metrics_v0; + gpu_metrics = (struct smu_v13_0_6_gpu_metrics + *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer); + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0, + gpu_metrics); + goto fill; + } metrics_v1 = (MetricsTableV1_t *)metrics_v0; metrics_v2 = (MetricsTableV2_t *)metrics_v0; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); - gpu_metrics->temperature_hotspot = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)); /* Individual HBM stack temperature is not reported */ @@ -2889,55 +2994,49 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->xgmi_link_status[j] = ret; } - gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; - per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; - for_each_xcp(adev->xcp_mgr, xcp, i) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - /* Both JPEG and VCN has same instances */ - inst = GET_INST(VCN, k); - - for (j = 0; j < num_jpeg_rings; ++j) { - gpu_metrics->xcp_stats[i].jpeg_busy - [(idx * num_jpeg_rings) + j] = - SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version) - [(inst * num_jpeg_rings) + j]); - } - gpu_metrics->xcp_stats[i].vcn_busy[idx] = - SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); - idx++; - - } + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + inst = GET_INST(JPEG, i); + for (j = 0; j < num_jpeg_rings; ++j) + gpu_metrics->jpeg_busy[(i * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD( + JpegBusy, + version)[(inst * num_jpeg_rings) + j]); + } + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + inst = GET_INST(VCN, i); + gpu_metrics->vcn_busy[i] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); + } - if (per_inst) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - inst = GET_INST(GC, k); - gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = - SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); - gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = - SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, - version)[inst]); - if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { - gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkLowUtilizationAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]); - } - idx++; + if (per_inst) { + for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) { + inst = GET_INST(GC, i); + gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); + gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); + if (smu_v13_0_6_cap_supported( + smu, SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics->gfx_below_host_limit_ppt_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitPptAcc + [inst]); + gpu_metrics->gfx_below_host_limit_thm_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitThmAcc + [inst]); + gpu_metrics->gfx_low_utilization_acc + [i] = SMUQ10_ROUND( + metrics_v0 + ->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->gfx_below_host_limit_total_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitTotalAcc + [inst]); } } } @@ -2947,7 +3046,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); - *table = (void *)gpu_metrics; +fill: + *table = tables[SMU_TABLE_SMU_METRICS].cache.buffer; return sizeof(*gpu_metrics); } @@ -3226,6 +3326,24 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask) return ret; } +static int smu_v13_0_6_ras_send_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t param, uint32_t *read_arg) +{ + int ret; + + switch (msg) { + case SMU_MSG_QueryValidMcaCount: + case SMU_MSG_QueryValidMcaCeCount: + case SMU_MSG_McaBankDumpDW: + case SMU_MSG_McaBankCeDumpDW: + case SMU_MSG_ClearMcaOnRead: + ret = smu_cmn_send_smc_msg_with_param(smu, msg, param, read_arg); + break; + default: + ret = -EPERM; + } + + return ret; +} static int smu_v13_0_6_post_init(struct smu_context *smu) { @@ -3863,6 +3981,29 @@ static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu) == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL; } +static int smu_v13_0_6_get_ras_smu_drv(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv) +{ + if (!ras_smu_drv) + return -EINVAL; + + if (amdgpu_sriov_vf(smu->adev)) + return -EOPNOTSUPP; + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_HROM_EN_BIT)) + smu_v13_0_6_cap_set(smu, SMU_CAP(RAS_EEPROM)); + + switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 12): + *ras_smu_drv = &smu_v13_0_12_ras_smu_drv; + break; + default: + *ras_smu_drv = NULL; + break; + } + + return 0; +} + static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -3894,6 +4035,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .get_enabled_mask = smu_v13_0_6_get_enabled_mask, .feature_is_enabled = smu_cmn_feature_is_enabled, .set_power_limit = smu_v13_0_6_set_power_limit, + .get_ppt_limit = smu_v13_0_6_get_ppt_limit, .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate, .register_irq_handler = smu_v13_0_6_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, @@ -3921,6 +4063,8 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .reset_sdma = smu_v13_0_6_reset_sdma, .dpm_reset_vcn = smu_v13_0_6_reset_vcn, .post_init = smu_v13_0_6_post_init, + .ras_send_msg = smu_v13_0_6_ras_send_msg, + .get_ras_smu_drv = smu_v13_0_6_get_ras_smu_drv, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index 7ef5f3e66c27..6cbdd7c5ded9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -50,6 +50,9 @@ struct PPTable_t { uint32_t MinLclkDpmRange; uint64_t PublicSerialNumber_AID; uint32_t MaxNodePowerLimit; + uint32_t PPT1Max; + uint32_t PPT1Min; + uint32_t PPT1Default; bool Init; }; @@ -72,9 +75,18 @@ enum smu_v13_0_6_caps { SMU_CAP(PLDM_VERSION), SMU_CAP(TEMP_METRICS), SMU_CAP(NPM_METRICS), + SMU_CAP(RAS_EEPROM), + SMU_CAP(FAST_PPT), SMU_CAP(ALL), }; +#define SMU_13_0_6_NUM_XGMI_LINKS 8 +#define SMU_13_0_6_MAX_GFX_CLKS 8 +#define SMU_13_0_6_MAX_CLKS 4 +#define SMU_13_0_6_MAX_XCC 8 +#define SMU_13_0_6_MAX_VCN 4 +#define SMU_13_0_6_MAX_JPEG 40 + extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); @@ -87,7 +99,6 @@ size_t smu_v13_0_12_get_system_metrics_size(void); int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value); -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics); ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics); @@ -99,4 +110,156 @@ int smu_v13_0_12_get_npm_data(struct smu_context *smu, extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs; +extern const struct ras_smu_drv smu_v13_0_12_ras_smu_drv; + +#if defined(SWSMU_CODE_LAYER_L2) +#include "smu_cmn.h" + +/* SMUv 13.0.6 GPU metrics*/ +#define SMU_13_0_6_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_hotspot); \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_mem); \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_vrsoc); \ + SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1), \ + SMU_MTYPE(U16), curr_socket_power); \ + SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U16), average_gfx_activity); \ + SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U16), average_umc_activity); \ + SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1), \ + SMU_MTYPE(U64), mem_max_bandwidth); \ + SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), energy_accumulator); \ + SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \ + SMU_MTYPE(U64), system_clock_counter); \ + SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), accumulation_counter); \ + SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), prochot_residency_acc); \ + SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), ppt_residency_acc); \ + SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), socket_thm_residency_acc); \ + SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), vr_thm_residency_acc); \ + SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), hbm_thm_residency_acc); \ + SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), gfxclk_lock_status); \ + SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), pcie_link_width); \ + SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2), \ + SMU_MTYPE(U16), pcie_link_speed); \ + SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), xgmi_link_width); \ + SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1), \ + SMU_MTYPE(U16), xgmi_link_speed); \ + SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_activity_acc); \ + SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), mem_activity_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U64), pcie_bandwidth_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1), \ + SMU_MTYPE(U64), pcie_bandwidth_inst); \ + SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_l0_to_recov_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_replay_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_replay_rover_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), pcie_nak_sent_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), pcie_nak_rcvd_count_acc); \ + SMU_ARRAY(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1), \ + SMU_MTYPE(U64), xgmi_read_data_acc, \ + SMU_13_0_6_NUM_XGMI_LINKS); \ + SMU_ARRAY(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1), \ + SMU_MTYPE(U64), xgmi_write_data_acc, \ + SMU_13_0_6_NUM_XGMI_LINKS); \ + SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), xgmi_link_status, \ + SMU_13_0_6_NUM_XGMI_LINKS); \ + SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \ + SMU_MTYPE(U64), firmware_timestamp); \ + SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_GFX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_uclk); \ + SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY), \ + SMU_MUNIT(NONE), SMU_MTYPE(U32), \ + pcie_lc_perf_other_end_recovery); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + jpeg_busy, SMU_13_0_6_MAX_JPEG); \ + SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + vcn_busy, SMU_13_0_6_MAX_VCN); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \ + gfx_busy_acc, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_low_utilization_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \ + SMU_13_0_6_MAX_XCC); + +DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS); +void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, + void *smu_metrics, + struct smu_v13_0_6_gpu_metrics *gpu_metrics); + +#define SMU_13_0_6_PARTITION_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \ + SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_uclk); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + jpeg_busy, SMU_13_0_6_MAX_JPEG); \ + SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + vcn_busy, SMU_13_0_6_MAX_VCN); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \ + gfx_busy_acc, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_low_utilization_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \ + SMU_13_0_6_MAX_XCC); + +DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_partition_metrics, + SMU_13_0_6_PARTITION_METRICS_FIELDS); + +#endif /* SWSMU_CODE_LAYER_L2 */ + #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c96fa5e49ed6..a3fc35b9011e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1184,15 +1184,16 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1523,7 +1524,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 73b4506ef5a8..5d7e671fa3c3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1041,12 +1041,13 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu, static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; uint32_t clk_limit = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -1111,7 +1112,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } static int yellow_carp_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index fe00c84b1cc6..b1bd946d8e30 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1132,11 +1132,12 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, ret = 0, size = 0; + int i, idx, ret = 0, size = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -1202,7 +1203,7 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 086501cc5213..2cea688c604f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -1056,15 +1056,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, struct smu_14_0_dpm_table *single_dpm_table; struct smu_14_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1374,7 +1375,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index a8961a8f5c42..4040ff926544 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -164,9 +164,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, msg_index, param, message); break; case SMU_RESP_BUSY_OTHER: - dev_err_ratelimited(adev->dev, - "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", - msg_index, param, message); + /* It is normal for SMU_MSG_GetBadPageCount to return busy + * so don't print error at this case. + */ + if (msg != SMU_MSG_GetBadPageCount) + dev_err_ratelimited(adev->dev, + "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", + msg_index, param, message); break; case SMU_RESP_DEBUG_END: dev_err_ratelimited(adev->dev, @@ -980,7 +984,7 @@ int smu_cmn_update_table(struct smu_context *smu, * Flush hdp cache: to guard the content seen by * GPU is consitent with CPU. */ - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); } ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? @@ -992,7 +996,7 @@ int smu_cmn_update_table(struct smu_context *smu, return ret; if (!drv2smu) { - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table_data, table->cpu_addr, table_size); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 0ae91c8b6d72..8d7c4814c68f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -202,5 +202,72 @@ void smu_cmn_get_backend_workload_mask(struct smu_context *smu, u32 workload_mask, u32 *backend_workload_mask); +/*SMU gpu metrics */ + +/* Attribute ID mapping */ +#define SMU_MATTR(X) AMDGPU_METRICS_ATTR_ID_##X +/* Type ID mapping */ +#define SMU_MTYPE(X) AMDGPU_METRICS_TYPE_##X +/* Unit ID mapping */ +#define SMU_MUNIT(X) AMDGPU_METRICS_UNIT_##X + +/* Map TYPEID to C type */ +#define SMU_CTYPE(TYPEID) SMU_CTYPE_##TYPEID + +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U8 u8 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S8 s8 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U16 u16 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S16 s16 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U32 u32 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S32 s32 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U64 u64 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S64 s64 + +/* struct members */ +#define SMU_METRICS_SCALAR(ID, UNIT, TYPEID, NAME) \ + u64 NAME##_ftype; \ + SMU_CTYPE(TYPEID) NAME + +#define SMU_METRICS_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \ + u64 NAME##_ftype; \ + SMU_CTYPE(TYPEID) NAME[SIZE] + +/* Init functions for scalar/array fields - init to 0xFFs */ +#define SMU_METRICS_INIT_SCALAR(ID, UNIT, TYPEID, NAME) \ + do { \ + obj->NAME##_ftype = \ + AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, 1); \ + obj->NAME = (SMU_CTYPE(TYPEID)) ~0; \ + count++; \ + } while (0) + +#define SMU_METRICS_INIT_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \ + do { \ + obj->NAME##_ftype = \ + AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, SIZE); \ + memset(obj->NAME, 0xFF, sizeof(obj->NAME)); \ + count++; \ + } while (0) + +/* Declare Metrics Class and Template object */ +#define DECLARE_SMU_METRICS_CLASS(CLASSNAME, SMU_METRICS_FIELD_LIST) \ + struct __packed CLASSNAME { \ + struct metrics_table_header header; \ + int attr_count; \ + SMU_METRICS_FIELD_LIST(SMU_METRICS_SCALAR, SMU_METRICS_ARRAY); \ + }; \ + static inline void CLASSNAME##_init(struct CLASSNAME *obj, \ + uint8_t frev, uint8_t crev) \ + { \ + int count = 0; \ + memset(obj, 0xFF, sizeof(*obj)); \ + obj->header.format_revision = frev; \ + obj->header.content_revision = crev; \ + obj->header.structure_size = sizeof(*obj); \ + SMU_METRICS_FIELD_LIST(SMU_METRICS_INIT_SCALAR, \ + SMU_METRICS_INIT_ARRAY) \ + obj->attr_count = count; \ + } + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index c09ecf1a68a0..34f6b4b1c3ba 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -100,6 +100,7 @@ #define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu) #define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable) #define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range) +#define smu_get_ras_smu_drv(smu, ras_smu_drv) smu_ppt_funcs(get_ras_smu_drv, -EOPNOTSUPP, smu, ras_smu_drv) #endif #endif diff --git a/drivers/gpu/drm/amd/ras/Makefile b/drivers/gpu/drm/amd/ras/Makefile new file mode 100644 index 000000000000..bbdaba811d34 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/Makefile @@ -0,0 +1,34 @@ +# +# Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +ifeq ($(AMD_GPU_RAS_MGR),) + AMD_GPU_RAS_MGR := ras_mgr +endif + +subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/rascore +subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/$(AMD_GPU_RAS_MGR) + +RAS_LIBS = $(AMD_GPU_RAS_MGR) rascore + +AMD_RAS = $(addsuffix /Makefile, $(addprefix $(AMD_GPU_RAS_FULL_PATH)/,$(RAS_LIBS))) + +include $(AMD_RAS) + diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/Makefile b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile new file mode 100644 index 000000000000..5e5a2cfa4068 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile @@ -0,0 +1,33 @@ +# Copyright 2025 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +RAS_MGR_FILES = amdgpu_ras_sys.o \ + amdgpu_ras_mgr.o \ + amdgpu_ras_eeprom_i2c.o \ + amdgpu_ras_mp1_v13_0.o \ + amdgpu_ras_cmd.o \ + amdgpu_ras_process.o \ + amdgpu_ras_nbio_v7_9.o + + +RAS_MGR = $(addprefix $(AMD_GPU_RAS_PATH)/ras_mgr/, $(RAS_MGR_FILES)) + +AMD_GPU_RAS_FILES += $(RAS_MGR) + diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c new file mode 100644 index 000000000000..78419b7f7729 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/pci.h> +#include "amdgpu.h" +#include "amdgpu_ras.h" +#include "ras_sys.h" +#include "amdgpu_ras_cmd.h" +#include "amdgpu_ras_mgr.h" + +/* inject address is 52 bits */ +#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) + +#define AMDGPU_RAS_TYPE_RASCORE 0x1 +#define AMDGPU_RAS_TYPE_AMDGPU 0x2 +#define AMDGPU_RAS_TYPE_VF 0x3 + +static int amdgpu_ras_trigger_error_prepare(struct ras_core_context *ras_core, + struct ras_cmd_inject_error_req *block_info) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret; + + if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) { + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + RAS_DEV_WARN(adev, "Failed to disallow df cstate"); + + ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DISALLOW); + if (ret && (ret != -EOPNOTSUPP)) + RAS_DEV_WARN(adev, "Failed to disallow XGMI power down"); + } + + return 0; +} + +static int amdgpu_ras_trigger_error_end(struct ras_core_context *ras_core, + struct ras_cmd_inject_error_req *block_info) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret; + + if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) { + if (amdgpu_ras_intr_triggered()) + return 0; + + ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DEFAULT); + if (ret && (ret != -EOPNOTSUPP)) + RAS_DEV_WARN(adev, "Failed to allow XGMI power down"); + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + RAS_DEV_WARN(adev, "Failed to allow df cstate"); + } + + return 0; +} + +static uint64_t local_addr_to_xgmi_global_addr(struct ras_core_context *ras_core, + uint64_t addr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi; + + return (addr + xgmi->physical_node_id * xgmi->node_segment_size); +} + +static int amdgpu_ras_inject_error(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_cmd_inject_error_req *req = + (struct ras_cmd_inject_error_req *)cmd->input_buff_raw; + int ret = RAS_CMD__ERROR_GENERIC; + + if (req->block_id == RAS_BLOCK_ID__UMC) { + if (amdgpu_ras_mgr_check_retired_addr(adev, req->address)) { + RAS_DEV_WARN(ras_core->dev, + "RAS WARN: inject: 0x%llx has already been marked as bad!\n", + req->address); + return RAS_CMD__ERROR_ACCESS_DENIED; + } + + if ((req->address >= adev->gmc.mc_vram_size && + adev->gmc.mc_vram_size) || + (req->address >= RAS_UMC_INJECT_ADDR_LIMIT)) { + RAS_DEV_WARN(adev, "RAS WARN: input address 0x%llx is invalid.", + req->address); + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + } + + /* Calculate XGMI relative offset */ + if (adev->gmc.xgmi.num_physical_nodes > 1 && + req->block_id != RAS_BLOCK_ID__GFX) { + req->address = local_addr_to_xgmi_global_addr(ras_core, req->address); + } + } + + amdgpu_ras_trigger_error_prepare(ras_core, req); + ret = rascore_handle_cmd(ras_core, cmd, data); + amdgpu_ras_trigger_error_end(ras_core, req); + if (ret) { + RAS_DEV_ERR(adev, "ras inject block %u failed %d\n", req->block_id, ret); + ret = RAS_CMD__ERROR_ACCESS_DENIED; + } + + + return ret; +} + +static int amdgpu_ras_get_ras_safe_fb_addr_ranges(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_cmd_dev_handle *input_data = + (struct ras_cmd_dev_handle *)cmd->input_buff_raw; + struct ras_cmd_ras_safe_fb_address_ranges_rsp *ranges = + (struct ras_cmd_ras_safe_fb_address_ranges_rsp *)cmd->output_buff_raw; + struct amdgpu_mem_partition_info *mem_ranges; + uint32_t i = 0; + + if (cmd->input_size != sizeof(*input_data)) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + mem_ranges = adev->gmc.mem_partitions; + for (i = 0; i < adev->gmc.num_mem_partitions; i++) { + ranges->range[i].start = mem_ranges[i].range.fpfn << AMDGPU_GPU_PAGE_SHIFT; + ranges->range[i].size = mem_ranges[i].size; + ranges->range[i].idx = i; + } + + ranges->num_ranges = adev->gmc.num_mem_partitions; + + ranges->version = 0; + cmd->output_size = sizeof(struct ras_cmd_ras_safe_fb_address_ranges_rsp); + + return RAS_CMD__SUCCESS; +} + +static int ras_translate_fb_address(struct ras_core_context *ras_core, + enum ras_fb_addr_type src_type, + enum ras_fb_addr_type dest_type, + union ras_translate_fb_address *src_addr, + union ras_translate_fb_address *dest_addr) +{ + uint64_t soc_phy_addr; + int ret = RAS_CMD__SUCCESS; + + /* Does not need to be queued as event as this is a SW translation */ + switch (src_type) { + case RAS_FB_ADDR_SOC_PHY: + soc_phy_addr = src_addr->soc_phy_addr; + break; + case RAS_FB_ADDR_BANK: + ret = ras_cmd_translate_bank_to_soc_pa(ras_core, + src_addr->bank_addr, &soc_phy_addr); + if (ret) + return RAS_CMD__ERROR_GENERIC; + break; + default: + return RAS_CMD__ERROR_INVALID_CMD; + } + + switch (dest_type) { + case RAS_FB_ADDR_SOC_PHY: + dest_addr->soc_phy_addr = soc_phy_addr; + break; + case RAS_FB_ADDR_BANK: + ret = ras_cmd_translate_soc_pa_to_bank(ras_core, + soc_phy_addr, &dest_addr->bank_addr); + if (ret) + return RAS_CMD__ERROR_GENERIC; + break; + default: + return RAS_CMD__ERROR_INVALID_CMD; + } + + return ret; +} + +static int amdgpu_ras_translate_fb_address(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_translate_fb_address_req *req_buff = + (struct ras_cmd_translate_fb_address_req *)cmd->input_buff_raw; + struct ras_cmd_translate_fb_address_rsp *rsp_buff = + (struct ras_cmd_translate_fb_address_rsp *)cmd->output_buff_raw; + int ret = RAS_CMD__ERROR_GENERIC; + + if (cmd->input_size != sizeof(struct ras_cmd_translate_fb_address_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if ((req_buff->src_addr_type >= RAS_FB_ADDR_UNKNOWN) || + (req_buff->dest_addr_type >= RAS_FB_ADDR_UNKNOWN)) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + ret = ras_translate_fb_address(ras_core, req_buff->src_addr_type, + req_buff->dest_addr_type, &req_buff->trans_addr, &rsp_buff->trans_addr); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + rsp_buff->version = 0; + cmd->output_size = sizeof(struct ras_cmd_translate_fb_address_rsp); + + return RAS_CMD__SUCCESS; +} + +static struct ras_cmd_func_map amdgpu_ras_cmd_maps[] = { + {RAS_CMD__INJECT_ERROR, amdgpu_ras_inject_error}, + {RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES, amdgpu_ras_get_ras_safe_fb_addr_ranges}, + {RAS_CMD__TRANSLATE_FB_ADDRESS, amdgpu_ras_translate_fb_address}, +}; + +int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_func_map *ras_cmd = NULL; + int i, res; + + for (i = 0; i < ARRAY_SIZE(amdgpu_ras_cmd_maps); i++) { + if (cmd->cmd_id == amdgpu_ras_cmd_maps[i].cmd_id) { + ras_cmd = &amdgpu_ras_cmd_maps[i]; + break; + } + } + + if (ras_cmd) + res = ras_cmd->func(ras_core, cmd, NULL); + else + res = RAS_CMD__ERROR_UKNOWN_CMD; + + return res; +} + +int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd) +{ + struct ras_core_context *cmd_core = ras_core; + int timeout = 60; + int res; + + cmd->cmd_res = RAS_CMD__ERROR_INVALID_CMD; + cmd->output_size = 0; + + if (!ras_core_is_enabled(cmd_core)) + return RAS_CMD__ERROR_ACCESS_DENIED; + + while (ras_core_gpu_in_reset(cmd_core)) { + msleep(1000); + if (!timeout--) + return RAS_CMD__ERROR_TIMEOUT; + } + + res = amdgpu_ras_handle_cmd(cmd_core, cmd, NULL); + if (res == RAS_CMD__ERROR_UKNOWN_CMD) + res = rascore_handle_cmd(cmd_core, cmd, NULL); + + cmd->cmd_res = res; + + if (cmd->output_size > cmd->output_buf_size) { + RAS_DEV_ERR(cmd_core->dev, + "Output size 0x%x exceeds output buffer size 0x%x!\n", + cmd->output_size, cmd->output_buf_size); + return RAS_CMD__SUCCESS_EXEED_BUFFER; + } + + return RAS_CMD__SUCCESS; +} diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h new file mode 100644 index 000000000000..5973b156cc85 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMDGPU_RAS_CMD_H__ +#define __AMDGPU_RAS_CMD_H__ +#include "ras.h" + +enum amdgpu_ras_cmd_id { + RAS_CMD__AMDGPU_BEGIN = RAS_CMD_ID_AMDGPU_START, + RAS_CMD__TRANSLATE_MEMORY_FD, + RAS_CMD__AMDGPU_SUPPORTED_MAX = RAS_CMD_ID_AMDGPU_END, +}; + +struct ras_cmd_translate_memory_fd_req { + struct ras_cmd_dev_handle dev; + uint32_t type; + uint32_t fd; + uint64_t address; + uint32_t reserved[4]; +}; + +struct ras_cmd_translate_memory_fd_rsp { + uint32_t version; + uint32_t padding; + uint64_t start; + uint64_t size; + uint32_t reserved[2]; +}; + +int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data); +int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd); + +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c new file mode 100644 index 000000000000..3ed3ff42b7e1 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_ras_eeprom.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_eeprom_i2c.h" +#include "ras_eeprom.h" + +/* These are memory addresses as would be seen by one or more EEPROM + * chips strung on the I2C bus, usually by manipulating pins 1-3 of a + * set of EEPROM devices. They form a continuous memory space. + * + * The I2C device address includes the device type identifier, 1010b, + * which is a reserved value and indicates that this is an I2C EEPROM + * device. It also includes the top 3 bits of the 19 bit EEPROM memory + * address, namely bits 18, 17, and 16. This makes up the 7 bit + * address sent on the I2C bus with bit 0 being the direction bit, + * which is not represented here, and sent by the hardware directly. + * + * For instance, + * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0. + * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h. + * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h. + * Depending on the size of the I2C EEPROM device(s), bits 18:16 may + * address memory in a device or a device on the I2C bus, depending on + * the status of pins 1-3. See top of amdgpu_eeprom.c. + * + * The RAS table lives either at address 0 or address 40000h of EEPROM. + */ +#define EEPROM_I2C_MADDR_0 0x0 +#define EEPROM_I2C_MADDR_4 0x40000 + +#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF)) +#define to_amdgpu_ras(x) (container_of(x, struct amdgpu_ras, eeprom_control)) + +#define EEPROM_PAGE_BITS 8 +#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS) +#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1) + +#define EEPROM_OFFSET_SIZE 2 + +static int ras_eeprom_i2c_config(struct ras_core_context *ras_core) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + u8 i2c_addr; + + if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) { + /* The address given by VBIOS is an 8-bit, wire-format + * address, i.e. the most significant byte. + * + * Normalize it to a 19-bit EEPROM address. Remove the + * device type identifier and make it a 7-bit address; + * then make it a 19-bit EEPROM address. See top of + * amdgpu_eeprom.c. + */ + i2c_addr = (i2c_addr & 0x0F) >> 1; + control->i2c_address = ((u32) i2c_addr) << 16; + return 0; + } + + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 5): + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 10): + case IP_VERSION(13, 0, 12): + case IP_VERSION(13, 0, 14): + control->i2c_address = EEPROM_I2C_MADDR_4; + return 0; + default: + return -ENODATA; + } + return -ENODATA; +} + +static int ras_eeprom_i2c_xfer(struct ras_core_context *ras_core, u32 eeprom_addr, + u8 *eeprom_buf, u32 buf_size, bool read) +{ + struct i2c_adapter *i2c_adap = ras_core->ras_eeprom.i2c_adapter; + u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE]; + struct i2c_msg msgs[] = { + { + .flags = 0, + .len = EEPROM_OFFSET_SIZE, + .buf = eeprom_offset_buf, + }, + { + .flags = read ? I2C_M_RD : 0, + }, + }; + const u8 *p = eeprom_buf; + int r; + u16 len; + + for (r = 0; buf_size > 0; + buf_size -= len, eeprom_addr += len, eeprom_buf += len) { + /* Set the EEPROM address we want to write to/read from. + */ + msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr); + msgs[1].addr = msgs[0].addr; + msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff; + msgs[0].buf[1] = eeprom_addr & 0xff; + + if (!read) { + /* Write the maximum amount of data, without + * crossing the device's page boundary, as per + * its spec. Partial page writes are allowed, + * starting at any location within the page, + * so long as the page boundary isn't crossed + * over (actually the page pointer rolls + * over). + * + * As per the AT24CM02 EEPROM spec, after + * writing into a page, the I2C driver should + * terminate the transfer, i.e. in + * "i2c_transfer()" below, with a STOP + * condition, so that the self-timed write + * cycle begins. This is implied for the + * "i2c_transfer()" abstraction. + */ + len = min(EEPROM_PAGE_SIZE - (eeprom_addr & EEPROM_PAGE_MASK), + buf_size); + } else { + /* Reading from the EEPROM has no limitation + * on the number of bytes read from the EEPROM + * device--they are simply sequenced out. + * Keep in mind that i2c_msg.len is u16 type. + */ + len = min(U16_MAX, buf_size); + } + msgs[1].len = len; + msgs[1].buf = eeprom_buf; + + + /* This constitutes a START-STOP transaction. + */ + r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs)); + if (r != ARRAY_SIZE(msgs)) + break; + + if (!read) { + /* According to EEPROM specs the length of the + * self-writing cycle, tWR (tW), is 10 ms. + * + * TODO: Use polling on ACK, aka Acknowledge + * Polling, to minimize waiting for the + * internal write cycle to complete, as it is + * usually smaller than tWR (tW). + */ + msleep(10); + } + } + + return r < 0 ? r : eeprom_buf - p; +} + +const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func = { + .eeprom_i2c_xfer = ras_eeprom_i2c_xfer, + .update_eeprom_i2c_config = ras_eeprom_i2c_config, +}; diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h new file mode 100644 index 000000000000..3b5878605411 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __AMDGPU_RAS_EEPROM_I2C_H__ +#define __AMDGPU_RAS_EEPROM_I2C_H__ +#include "ras.h" + +extern const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func; +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c new file mode 100644 index 000000000000..afe8135b6258 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_reset.h" +#include "amdgpu_xgmi.h" +#include "ras_sys.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_cmd.h" +#include "amdgpu_ras_process.h" +#include "amdgpu_ras_eeprom_i2c.h" +#include "amdgpu_ras_mp1_v13_0.h" +#include "amdgpu_ras_nbio_v7_9.h" + +#define MAX_SOCKET_NUM_PER_HIVE 8 +#define MAX_AID_NUM_PER_SOCKET 4 +#define MAX_XCD_NUM_PER_AID 2 + +/* typical ECC bad page rate is 1 bad page per 100MB VRAM */ +#define TYPICAL_ECC_BAD_PAGE_RATE (100ULL * SZ_1M) + +#define COUNT_BAD_PAGE_THRESHOLD(size) (((size) >> 21) << 4) + +/* Reserve 8 physical dram row for possible retirement. + * In worst cases, it will lose 8 * 2MB memory in vram domain + */ +#define RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20) + + +static void ras_mgr_init_event_mgr(struct ras_event_manager *mgr) +{ + struct ras_event_state *event_state; + int i; + + memset(mgr, 0, sizeof(*mgr)); + atomic64_set(&mgr->seqno, 0); + + for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) { + event_state = &mgr->event_state[i]; + event_state->last_seqno = RAS_EVENT_INVALID_ID; + atomic64_set(&event_state->count, 0); + } +} + +static void amdgpu_ras_mgr_init_event_mgr(struct ras_core_context *ras_core) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_event_manager *event_mgr; + struct amdgpu_hive_info *hive; + + hive = amdgpu_get_xgmi_hive(adev); + event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr; + + /* init event manager with node 0 on xgmi system */ + if (!amdgpu_reset_in_recovery(adev)) { + if (!hive || adev->gmc.xgmi.node_id == 0) + ras_mgr_init_event_mgr(event_mgr); + } + + if (hive) + amdgpu_put_xgmi_hive(hive); +} + +static int amdgpu_ras_mgr_init_aca_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_aca_config *aca_cfg = &config->aca_cfg; + + aca_cfg->socket_num_per_hive = MAX_SOCKET_NUM_PER_HIVE; + aca_cfg->aid_num_per_socket = MAX_AID_NUM_PER_SOCKET; + aca_cfg->xcd_num_per_aid = MAX_XCD_NUM_PER_AID; + + return 0; +} + +static int amdgpu_ras_mgr_init_eeprom_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_eeprom_config *eeprom_cfg = &config->eeprom_cfg; + + eeprom_cfg->eeprom_sys_fn = &amdgpu_ras_eeprom_i2c_sys_func; + eeprom_cfg->eeprom_i2c_adapter = adev->pm.ras_eeprom_i2c_bus; + if (eeprom_cfg->eeprom_i2c_adapter) { + const struct i2c_adapter_quirks *quirks = + ((struct i2c_adapter *)eeprom_cfg->eeprom_i2c_adapter)->quirks; + + if (quirks) { + eeprom_cfg->max_i2c_read_len = quirks->max_read_len; + eeprom_cfg->max_i2c_write_len = quirks->max_write_len; + } + } + + /* + * amdgpu_bad_page_threshold is used to config + * the threshold for the number of bad pages. + * -1: Threshold is set to default value + * Driver will issue a warning message when threshold is reached + * and continue runtime services. + * 0: Disable bad page retirement + * Driver will not retire bad pages + * which is intended for debugging purpose. + * -2: Threshold is determined by a formula + * that assumes 1 bad page per 100M of local memory. + * Driver will continue runtime services when threhold is reached. + * 0 < threshold < max number of bad page records in EEPROM, + * A user-defined threshold is set + * Driver will halt runtime services when this custom threshold is reached. + */ + if (amdgpu_bad_page_threshold == NONSTOP_OVER_THRESHOLD) + eeprom_cfg->eeprom_record_threshold_count = + div64_u64(adev->gmc.mc_vram_size, TYPICAL_ECC_BAD_PAGE_RATE); + else if (amdgpu_bad_page_threshold == WARN_NONSTOP_OVER_THRESHOLD) + eeprom_cfg->eeprom_record_threshold_count = + COUNT_BAD_PAGE_THRESHOLD(RAS_RESERVED_VRAM_SIZE_DEFAULT); + else + eeprom_cfg->eeprom_record_threshold_count = amdgpu_bad_page_threshold; + + eeprom_cfg->eeprom_record_threshold_config = amdgpu_bad_page_threshold; + + return 0; +} + +static int amdgpu_ras_mgr_init_mp1_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_mp1_config *mp1_cfg = &config->mp1_cfg; + int ret = 0; + + switch (config->mp1_ip_version) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 12): + mp1_cfg->mp1_sys_fn = &amdgpu_ras_mp1_sys_func_v13_0; + break; + default: + RAS_DEV_ERR(adev, + "The mp1(0x%x) ras config is not right!\n", + config->mp1_ip_version); + ret = -EINVAL; + break; + } + + return ret; +} + +static int amdgpu_ras_mgr_init_nbio_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_nbio_config *nbio_cfg = &config->nbio_cfg; + int ret = 0; + + switch (config->nbio_ip_version) { + case IP_VERSION(7, 9, 0): + case IP_VERSION(7, 9, 1): + nbio_cfg->nbio_sys_fn = &amdgpu_ras_nbio_sys_func_v7_9; + break; + default: + RAS_DEV_ERR(adev, + "The nbio(0x%x) ras config is not right!\n", + config->nbio_ip_version); + ret = -EINVAL; + break; + } + + return ret; +} + +static int amdgpu_ras_mgr_get_ras_psp_system_status(struct ras_core_context *ras_core, + struct ras_psp_sys_status *status) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ta_context *context = &adev->psp.ras_context.context; + + status->initialized = context->initialized; + status->session_id = context->session_id; + status->psp_cmd_mutex = &adev->psp.mutex; + + return 0; +} + +static int amdgpu_ras_mgr_get_ras_ta_init_param(struct ras_core_context *ras_core, + struct ras_ta_init_param *ras_ta_param) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + uint32_t nps_mode; + + if (amdgpu_ras_is_poison_mode_supported(adev)) + ras_ta_param->poison_mode_en = 1; + + if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) + ras_ta_param->dgpu_mode = 1; + + ras_ta_param->xcc_mask = adev->gfx.xcc_mask; + ras_ta_param->channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; + + ras_ta_param->active_umc_mask = adev->umc.active_mask; + + if (!amdgpu_ras_mgr_get_curr_nps_mode(adev, &nps_mode)) + ras_ta_param->nps_mode = nps_mode; + + return 0; +} + +const struct ras_psp_sys_func amdgpu_ras_psp_sys_func = { + .get_ras_psp_system_status = amdgpu_ras_mgr_get_ras_psp_system_status, + .get_ras_ta_init_param = amdgpu_ras_mgr_get_ras_ta_init_param, +}; + +static int amdgpu_ras_mgr_init_psp_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_psp_config *psp_cfg = &config->psp_cfg; + + psp_cfg->psp_sys_fn = &amdgpu_ras_psp_sys_func; + + return 0; +} + +static int amdgpu_ras_mgr_init_umc_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_umc_config *umc_cfg = &config->umc_cfg; + + umc_cfg->umc_vram_type = adev->gmc.vram_type; + + return 0; +} + +static struct ras_core_context *amdgpu_ras_mgr_create_ras_core(struct amdgpu_device *adev) +{ + struct ras_core_config init_config; + + memset(&init_config, 0, sizeof(init_config)); + + init_config.umc_ip_version = amdgpu_ip_version(adev, UMC_HWIP, 0); + init_config.mp1_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0); + init_config.gfx_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0); + init_config.nbio_ip_version = amdgpu_ip_version(adev, NBIO_HWIP, 0); + init_config.psp_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0); + + if (init_config.umc_ip_version == IP_VERSION(12, 0, 0) || + init_config.umc_ip_version == IP_VERSION(12, 5, 0)) + init_config.aca_ip_version = IP_VERSION(1, 0, 0); + + init_config.sys_fn = &amdgpu_ras_sys_fn; + init_config.ras_eeprom_supported = true; + init_config.poison_supported = + amdgpu_ras_is_poison_mode_supported(adev); + + amdgpu_ras_mgr_init_aca_config(adev, &init_config); + amdgpu_ras_mgr_init_eeprom_config(adev, &init_config); + amdgpu_ras_mgr_init_mp1_config(adev, &init_config); + amdgpu_ras_mgr_init_nbio_config(adev, &init_config); + amdgpu_ras_mgr_init_psp_config(adev, &init_config); + amdgpu_ras_mgr_init_umc_config(adev, &init_config); + + return ras_core_create(&init_config); +} + +static int amdgpu_ras_mgr_sw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_mgr *ras_mgr; + int ret = 0; + + /* Disabled by default */ + con->uniras_enabled = false; + + /* Enabled only in debug mode */ + if (adev->debug_enable_ras_aca) { + con->uniras_enabled = true; + RAS_DEV_INFO(adev, "Debug amdgpu uniras!"); + } + + if (!con->uniras_enabled) + return 0; + + ras_mgr = kzalloc(sizeof(*ras_mgr), GFP_KERNEL); + if (!ras_mgr) + return -EINVAL; + + con->ras_mgr = ras_mgr; + ras_mgr->adev = adev; + + ras_mgr->ras_core = amdgpu_ras_mgr_create_ras_core(adev); + if (!ras_mgr->ras_core) { + RAS_DEV_ERR(adev, "Failed to create ras core!\n"); + ret = -EINVAL; + goto err; + } + + ras_mgr->ras_core->dev = adev; + + amdgpu_ras_process_init(adev); + ras_core_sw_init(ras_mgr->ras_core); + amdgpu_ras_mgr_init_event_mgr(ras_mgr->ras_core); + return 0; + +err: + kfree(ras_mgr); + return ret; +} + +static int amdgpu_ras_mgr_sw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_mgr *ras_mgr = (struct amdgpu_ras_mgr *)con->ras_mgr; + + if (!con->uniras_enabled) + return 0; + + if (!ras_mgr) + return 0; + + amdgpu_ras_process_fini(adev); + ras_core_sw_fini(ras_mgr->ras_core); + ras_core_destroy(ras_mgr->ras_core); + ras_mgr->ras_core = NULL; + + kfree(con->ras_mgr); + con->ras_mgr = NULL; + + return 0; +} + +static int amdgpu_ras_mgr_hw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + int ret; + + if (!con->uniras_enabled) + return 0; + + if (!ras_mgr || !ras_mgr->ras_core) + return -EINVAL; + + ret = ras_core_hw_init(ras_mgr->ras_core); + if (ret) { + RAS_DEV_ERR(adev, "Failed to initialize ras core!\n"); + return ret; + } + + ras_mgr->ras_is_ready = true; + + amdgpu_enable_uniras(adev, true); + + RAS_DEV_INFO(adev, "AMDGPU RAS Is Ready.\n"); + return 0; +} + +static int amdgpu_ras_mgr_hw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!con->uniras_enabled) + return 0; + + if (!ras_mgr || !ras_mgr->ras_core) + return -EINVAL; + + ras_core_hw_fini(ras_mgr->ras_core); + + ras_mgr->ras_is_ready = false; + + return 0; +} + +struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(struct amdgpu_device *adev) +{ + if (!adev || !adev->psp.ras_context.ras) + return NULL; + + return (struct amdgpu_ras_mgr *)adev->psp.ras_context.ras->ras_mgr; +} + +static const struct amd_ip_funcs __maybe_unused ras_v1_0_ip_funcs = { + .name = "ras_v1_0", + .sw_init = amdgpu_ras_mgr_sw_init, + .sw_fini = amdgpu_ras_mgr_sw_fini, + .hw_init = amdgpu_ras_mgr_hw_init, + .hw_fini = amdgpu_ras_mgr_hw_fini, +}; + +const struct amdgpu_ip_block_version ras_v1_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_RAS, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &ras_v1_0_ip_funcs, +}; + +int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core) + return -EPERM; + + if (amdgpu_sriov_vf(adev)) + return -EPERM; + + RAS_DEV_INFO(adev, "Enable amdgpu unified ras!"); + return ras_core_set_status(ras_mgr->ras_core, enable); +} + +bool amdgpu_uniras_enabled(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core) + return false; + + if (amdgpu_sriov_vf(adev)) + return false; + + return ras_core_is_enabled(ras_mgr->ras_core); +} + +static bool amdgpu_ras_mgr_is_ready(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (ras_mgr && ras_mgr->ras_core && ras_mgr->ras_is_ready && + ras_core_is_ready(ras_mgr->ras_core)) + return true; + + return false; +} + +int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + return ras_core_handle_nbio_irq(ras_mgr->ras_core, data); +} + +uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev, + enum ras_seqno_type seqno_type) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + int ret; + uint64_t seq_no; + + if (!amdgpu_ras_mgr_is_ready(adev) || + (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)) + return 0; + + seq_no = ras_core_gen_seqno(ras_mgr->ras_core, seqno_type); + + if ((seqno_type == RAS_SEQNO_TYPE_DE) || + (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)) { + ret = ras_core_put_seqno(ras_mgr->ras_core, seqno_type, seq_no); + if (ret) + RAS_DEV_WARN(adev, "There are too many ras interrupts!"); + } + + return seq_no; +} + +int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_ih_info *ih_info = (struct ras_ih_info *)data; + uint64_t seq_no = 0; + int ret = 0; + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + if (ih_info && (ih_info->block == AMDGPU_RAS_BLOCK__UMC)) { + if (ras_mgr->ras_core->poison_supported) { + seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_DE); + RAS_DEV_INFO(adev, + "{%llu} RAS poison is created, no user action is needed.\n", + seq_no); + } + + ret = amdgpu_ras_process_handle_umc_interrupt(adev, ih_info); + } else if (ras_mgr->ras_core->poison_supported) { + ret = amdgpu_ras_process_handle_unexpected_interrupt(adev, ih_info); + } else { + RAS_DEV_WARN(adev, + "No RAS interrupt handler for non-UMC block with poison disabled.\n"); + } + + return ret; +} + +int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data) +{ + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + return amdgpu_ras_process_handle_consumption_interrupt(adev, data); +} + +int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + return ras_core_update_ecc_info(ras_mgr->ras_core); +} + +int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + con->gpu_reset_flags |= flags; + return amdgpu_ras_reset_gpu(adev); +} + +bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return false; + + return ras_eeprom_check_safety_watermark(ras_mgr->ras_core); +} + +int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev, + uint32_t *nps_mode) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + uint32_t mode; + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EINVAL; + + mode = ras_core_get_curr_nps_mode(ras_mgr->ras_core); + if (!mode || mode > AMDGPU_NPS8_PARTITION_MODE) + return -EINVAL; + + *nps_mode = mode; + + return 0; +} + +bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev, + uint64_t addr) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return false; + + return ras_umc_check_retired_addr(ras_mgr->ras_core, addr); +} + +bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core || !ras_mgr->ras_is_ready) + return false; + + return ras_core_gpu_is_rma(ras_mgr->ras_core); +} + +int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev, + uint32_t cmd_id, void *input, uint32_t input_size, + void *output, uint32_t out_size) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_cmd_ctx *cmd_ctx; + uint32_t ctx_buf_size = PAGE_SIZE; + int ret; + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + cmd_ctx = kzalloc(ctx_buf_size, GFP_KERNEL); + if (!cmd_ctx) + return -ENOMEM; + + cmd_ctx->cmd_id = cmd_id; + + memcpy(cmd_ctx->input_buff_raw, input, input_size); + cmd_ctx->input_size = input_size; + cmd_ctx->output_buf_size = ctx_buf_size - sizeof(*cmd_ctx); + + ret = amdgpu_ras_submit_cmd(ras_mgr->ras_core, cmd_ctx); + if (!ret && !cmd_ctx->cmd_res && output && (out_size == cmd_ctx->output_size)) + memcpy(output, cmd_ctx->output_buff_raw, cmd_ctx->output_size); + + kfree(cmd_ctx); + + return ret; +} + +int amdgpu_ras_mgr_pre_reset(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_mgr_is_ready(adev)) { + RAS_DEV_ERR(adev, "Invalid ras suspend!\n"); + return -EPERM; + } + + amdgpu_ras_process_pre_reset(adev); + return 0; +} + +int amdgpu_ras_mgr_post_reset(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_mgr_is_ready(adev)) { + RAS_DEV_ERR(adev, "Invalid ras resume!\n"); + return -EPERM; + } + + amdgpu_ras_process_post_reset(adev); + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h new file mode 100644 index 000000000000..8fb7eb4b8f13 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __AMDGPU_RAS_MGR_H__ +#define __AMDGPU_RAS_MGR_H__ +#include "ras.h" +#include "amdgpu_ras_process.h" + +enum ras_ih_type { + RAS_IH_NONE, + RAS_IH_FROM_BLOCK_CONTROLLER, + RAS_IH_FROM_CONSUMER_CLIENT, + RAS_IH_FROM_FATAL_ERROR, +}; + +struct ras_ih_info { + uint32_t block; + union { + struct amdgpu_iv_entry iv_entry; + struct { + uint16_t pasid; + uint32_t reset; + pasid_notify pasid_fn; + void *data; + }; + }; +}; + +struct amdgpu_ras_mgr { + struct amdgpu_device *adev; + struct ras_core_context *ras_core; + struct delayed_work retire_page_dwork; + struct ras_event_manager ras_event_mgr; + uint64_t last_poison_consumption_seqno; + bool ras_is_ready; + + bool is_paused; + struct completion ras_event_done; +}; + +extern const struct amdgpu_ip_block_version ras_v1_0_ip_block; + +struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context( + struct amdgpu_device *adev); +int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable); +bool amdgpu_uniras_enabled(struct amdgpu_device *adev); +int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data); +int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data); +int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data); +int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev); +int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags); +uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev, + enum ras_seqno_type seqno_type); +bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev); +int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev, uint32_t *nps_mode); +bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev, + uint64_t addr); +bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev); +int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev, + uint32_t cmd_id, void *input, uint32_t input_size, + void *output, uint32_t out_size); +int amdgpu_ras_mgr_pre_reset(struct amdgpu_device *adev); +int amdgpu_ras_mgr_post_reset(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c new file mode 100644 index 000000000000..79a51b1603ac --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu_smu.h" +#include "amdgpu_reset.h" +#include "amdgpu_ras_mp1_v13_0.h" + +#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A +#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B + +static int mp1_v13_0_get_valid_bank_count(struct ras_core_context *ras_core, + u32 msg, u32 *count) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + u32 smu_msg; + int ret = 0; + + if (!count) + return -EINVAL; + + smu_msg = (msg == RAS_MP1_MSG_QueryValidMcaCeCount) ? + SMU_MSG_QueryValidMcaCeCount : SMU_MSG_QueryValidMcaCount; + + if (down_read_trylock(&adev->reset_domain->sem)) { + ret = amdgpu_smu_ras_send_msg(adev, smu_msg, 0, count); + up_read(&adev->reset_domain->sem); + } else { + ret = -RAS_CORE_GPU_IN_MODE1_RESET; + } + + if (ret) + *count = 0; + + return ret; +} + +static int mp1_v13_0_dump_valid_bank(struct ras_core_context *ras_core, + u32 msg, u32 idx, u32 reg_idx, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + uint32_t data[2] = {0, 0}; + uint32_t param; + int ret = 0; + int i, offset; + u32 smu_msg = (msg == RAS_MP1_MSG_McaBankCeDumpDW) ? + SMU_MSG_McaBankCeDumpDW : SMU_MSG_McaBankDumpDW; + + if (down_read_trylock(&adev->reset_domain->sem)) { + offset = reg_idx * 8; + for (i = 0; i < ARRAY_SIZE(data); i++) { + param = ((idx & 0xffff) << 16) | ((offset + (i << 2)) & 0xfffc); + ret = amdgpu_smu_ras_send_msg(adev, smu_msg, param, &data[i]); + if (ret) { + RAS_DEV_ERR(adev, "ACA failed to read register[%d], offset:0x%x\n", + reg_idx, offset); + break; + } + } + up_read(&adev->reset_domain->sem); + + if (!ret) + *val = (uint64_t)data[1] << 32 | data[0]; + } else { + ret = -RAS_CORE_GPU_IN_MODE1_RESET; + } + + return ret; +} + +const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0 = { + .mp1_get_valid_bank_count = mp1_v13_0_get_valid_bank_count, + .mp1_dump_valid_bank = mp1_v13_0_dump_valid_bank, +}; + diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h new file mode 100644 index 000000000000..71c614ae1ae4 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMDGPU_RAS_MP1_V13_0_H__ +#define __AMDGPU_RAS_MP1_V13_0_H__ +#include "ras.h" + +extern const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c new file mode 100644 index 000000000000..2783f5875c7c --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_nbio_v7_9.h" +#include "nbio/nbio_7_9_0_offset.h" +#include "nbio/nbio_7_9_0_sh_mask.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" + +static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + /* Dummy function, there is no initialization operation in driver */ + + return 0; +} + +static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for ras_controller_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + /* Dummy function, there is no initialization operation in driver */ + + return 0; +} + +static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for err_event_athub_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = { + .set = nbio_v7_9_set_ras_controller_irq_state, + .process = nbio_v7_9_process_ras_controller_irq, +}; + +static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = { + .set = nbio_v7_9_set_ras_err_event_athub_irq_state, + .process = nbio_v7_9_process_err_event_athub_irq, +}; + +static int nbio_v7_9_init_ras_controller_interrupt(struct ras_core_context *ras_core, bool state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int r; + + /* init the irq funcs */ + adev->nbio.ras_controller_irq.funcs = + &nbio_v7_9_ras_controller_irq_funcs; + adev->nbio.ras_controller_irq.num_types = 1; + + /* register ras controller interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, + &adev->nbio.ras_controller_irq); + + return r; +} + +static int nbio_v7_9_init_ras_err_event_athub_interrupt(struct ras_core_context *ras_core, + bool state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int r; + + /* init the irq funcs */ + adev->nbio.ras_err_event_athub_irq.funcs = + &nbio_v7_9_ras_err_event_athub_irq_funcs; + adev->nbio.ras_err_event_athub_irq.num_types = 1; + + /* register ras err event athub interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, + &adev->nbio.ras_err_event_athub_irq); + + return r; +} + +const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9 = { + .set_ras_controller_irq_state = nbio_v7_9_init_ras_controller_interrupt, + .set_ras_err_event_athub_irq_state = nbio_v7_9_init_ras_err_event_athub_interrupt, +}; diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h new file mode 100644 index 000000000000..272259e9a0e7 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_RAS_NBIO_V7_9_H__ +#define __AMDGPU_RAS_NBIO_V7_9_H__ + +extern const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9; + +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c new file mode 100644 index 000000000000..5782c007de71 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_reset.h" +#include "amdgpu_xgmi.h" +#include "ras_sys.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_process.h" + +#define RAS_MGR_RETIRE_PAGE_INTERVAL 100 +#define RAS_EVENT_PROCESS_TIMEOUT 1200 + +static void ras_process_retire_page_dwork(struct work_struct *work) +{ + struct amdgpu_ras_mgr *ras_mgr = + container_of(work, struct amdgpu_ras_mgr, retire_page_dwork.work); + struct amdgpu_device *adev = ras_mgr->adev; + int ret; + + if (amdgpu_ras_is_rma(adev)) + return; + + /* If gpu reset is ongoing, delay retiring the bad pages */ + if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) { + schedule_delayed_work(&ras_mgr->retire_page_dwork, + msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL * 3)); + return; + } + + ret = ras_umc_handle_bad_pages(ras_mgr->ras_core, NULL); + if (!ret) + schedule_delayed_work(&ras_mgr->retire_page_dwork, + msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL)); +} + +int amdgpu_ras_process_init(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + ras_mgr->is_paused = false; + init_completion(&ras_mgr->ras_event_done); + + INIT_DELAYED_WORK(&ras_mgr->retire_page_dwork, ras_process_retire_page_dwork); + + return 0; +} + +int amdgpu_ras_process_fini(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + ras_mgr->is_paused = false; + /* Save all cached bad pages to eeprom */ + flush_delayed_work(&ras_mgr->retire_page_dwork); + cancel_delayed_work_sync(&ras_mgr->retire_page_dwork); + return 0; +} + +int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr->ras_core) + return -EINVAL; + + return ras_process_add_interrupt_req(ras_mgr->ras_core, NULL, true); +} + +int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev, void *data) +{ + amdgpu_ras_set_fed(adev, true); + return amdgpu_ras_mgr_reset_gpu(adev, AMDGPU_RAS_GPU_RESET_MODE1_RESET); +} + +int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_ih_info *ih_info = (struct ras_ih_info *)data; + struct ras_event_req req; + uint64_t seqno; + + if (!ih_info) + return -EINVAL; + + memset(&req, 0, sizeof(req)); + req.block = ih_info->block; + req.data = ih_info->data; + req.pasid = ih_info->pasid; + req.pasid_fn = ih_info->pasid_fn; + req.reset = ih_info->reset; + + seqno = ras_core_get_seqno(ras_mgr->ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, false); + + /* When the ACA register cannot be read from FW, the poison + * consumption seqno in the fifo will not pop up, so it is + * necessary to check whether the seqno is the previous seqno. + */ + if (seqno == ras_mgr->last_poison_consumption_seqno) { + /* Pop and discard the previous seqno */ + ras_core_get_seqno(ras_mgr->ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, true); + seqno = ras_core_get_seqno(ras_mgr->ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, false); + } + ras_mgr->last_poison_consumption_seqno = seqno; + req.seqno = seqno; + + return ras_process_add_interrupt_req(ras_mgr->ras_core, &req, false); +} + +int amdgpu_ras_process_begin(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (ras_mgr->is_paused) + return -EAGAIN; + + reinit_completion(&ras_mgr->ras_event_done); + return 0; +} + +int amdgpu_ras_process_end(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + complete(&ras_mgr->ras_event_done); + return 0; +} + +int amdgpu_ras_process_pre_reset(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + long rc; + + if (!ras_mgr || !ras_mgr->ras_core) + return -EINVAL; + + if (!ras_mgr->ras_core->is_initialized) + return -EPERM; + + ras_mgr->is_paused = true; + + /* Wait for RAS event processing to complete */ + rc = wait_for_completion_interruptible_timeout(&ras_mgr->ras_event_done, + msecs_to_jiffies(RAS_EVENT_PROCESS_TIMEOUT)); + if (rc <= 0) + RAS_DEV_WARN(adev, "Waiting for ras process to complete %s\n", + rc ? "interrupted" : "timeout"); + + flush_delayed_work(&ras_mgr->retire_page_dwork); + return 0; +} + +int amdgpu_ras_process_post_reset(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core) + return -EINVAL; + + if (!ras_mgr->ras_core->is_initialized) + return -EPERM; + + ras_mgr->is_paused = false; + + schedule_delayed_work(&ras_mgr->retire_page_dwork, 0); + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h new file mode 100644 index 000000000000..d55cdaeac441 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __AMDGPU_RAS_PROCESS_H__ +#define __AMDGPU_RAS_PROCESS_H__ +#include "ras_process.h" +#include "amdgpu_ras_mgr.h" + +enum ras_ih_type; +int amdgpu_ras_process_init(struct amdgpu_device *adev); +int amdgpu_ras_process_fini(struct amdgpu_device *adev); +int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev, + void *data); +int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev, + void *data); +int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev, + void *data); +int amdgpu_ras_process_begin(struct amdgpu_device *adev); +int amdgpu_ras_process_end(struct amdgpu_device *adev); +int amdgpu_ras_process_pre_reset(struct amdgpu_device *adev); +int amdgpu_ras_process_post_reset(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c new file mode 100644 index 000000000000..45ed8c3b5563 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras_sys.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras.h" +#include "amdgpu_reset.h" + +static int amdgpu_ras_sys_detect_fatal_event(struct ras_core_context *ras_core, void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret; + uint64_t seq_no; + + ret = amdgpu_ras_global_ras_isr(adev); + if (ret) + return ret; + + seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_UE); + RAS_DEV_INFO(adev, + "{%llu} Uncorrectable hardware error(ERREVENT_ATHUB_INTERRUPT) detected!\n", + seq_no); + + return amdgpu_ras_process_handle_unexpected_interrupt(adev, data); +} + +static int amdgpu_ras_sys_poison_consumption_event(struct ras_core_context *ras_core, + void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_event_req *req = (struct ras_event_req *)data; + pasid_notify pasid_fn; + + if (!req) + return -EINVAL; + + if (req->pasid_fn) { + pasid_fn = (pasid_notify)req->pasid_fn; + pasid_fn(adev, req->pasid, req->data); + } + + return 0; +} + +static int amdgpu_ras_sys_gen_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t *seqno) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_event_manager *event_mgr; + struct ras_event_state *event_state; + struct amdgpu_hive_info *hive; + enum ras_event_type event_type; + uint64_t seq_no; + + if (!ras_mgr || !seqno || + (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)) + return -EINVAL; + + switch (seqno_type) { + case RAS_SEQNO_TYPE_UE: + event_type = RAS_EVENT_TYPE_FATAL; + break; + case RAS_SEQNO_TYPE_CE: + case RAS_SEQNO_TYPE_DE: + event_type = RAS_EVENT_TYPE_POISON_CREATION; + break; + case RAS_SEQNO_TYPE_POISON_CONSUMPTION: + event_type = RAS_EVENT_TYPE_POISON_CONSUMPTION; + break; + default: + event_type = RAS_EVENT_TYPE_INVALID; + break; + } + + hive = amdgpu_get_xgmi_hive(adev); + event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr; + event_state = &event_mgr->event_state[event_type]; + if ((event_type == RAS_EVENT_TYPE_FATAL) && amdgpu_ras_in_recovery(adev)) { + seq_no = event_state->last_seqno; + } else { + seq_no = atomic64_inc_return(&event_mgr->seqno); + event_state->last_seqno = seq_no; + atomic64_inc(&event_state->count); + } + amdgpu_put_xgmi_hive(hive); + + *seqno = seq_no; + return 0; + +} + +static int amdgpu_ras_sys_event_notifier(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(ras_core->dev); + int ret = 0; + + switch (event_id) { + case RAS_EVENT_ID__BAD_PAGE_DETECTED: + schedule_delayed_work(&ras_mgr->retire_page_dwork, 0); + break; + case RAS_EVENT_ID__POISON_CONSUMPTION: + amdgpu_ras_sys_poison_consumption_event(ras_core, data); + break; + case RAS_EVENT_ID__RESERVE_BAD_PAGE: + ret = amdgpu_ras_reserve_page(ras_core->dev, *(uint64_t *)data); + break; + case RAS_EVENT_ID__FATAL_ERROR_DETECTED: + ret = amdgpu_ras_sys_detect_fatal_event(ras_core, data); + break; + case RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM: + ret = amdgpu_dpm_send_hbm_bad_pages_num(ras_core->dev, *(uint32_t *)data); + break; + case RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP: + ret = amdgpu_dpm_send_hbm_bad_channel_flag(ras_core->dev, *(uint32_t *)data); + break; + case RAS_EVENT_ID__DEVICE_RMA: + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_RMA, NULL, NULL); + ret = amdgpu_dpm_send_rma_reason(ras_core->dev); + break; + case RAS_EVENT_ID__RESET_GPU: + ret = amdgpu_ras_mgr_reset_gpu(ras_core->dev, *(uint32_t *)data); + break; + case RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN: + ret = amdgpu_ras_process_begin(ras_core->dev); + break; + case RAS_EVENT_ID__RAS_EVENT_PROC_END: + ret = amdgpu_ras_process_end(ras_core->dev); + break; + default: + RAS_DEV_WARN(ras_core->dev, "Invalid ras notify event:%d\n", event_id); + break; + } + + return ret; +} + +static u64 amdgpu_ras_sys_get_utc_second_timestamp(struct ras_core_context *ras_core) +{ + return ktime_get_real_seconds(); +} + +static int amdgpu_ras_sys_check_gpu_status(struct ras_core_context *ras_core, + uint32_t *status) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + uint32_t gpu_status = 0; + + if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) + gpu_status |= RAS_GPU_STATUS__IN_RESET; + + if (amdgpu_sriov_vf(adev)) + gpu_status |= RAS_GPU_STATUS__IS_VF; + + *status = gpu_status; + + return 0; +} + +static int amdgpu_ras_sys_get_device_system_info(struct ras_core_context *ras_core, + struct device_system_info *dev_info) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + + dev_info->device_id = adev->pdev->device; + dev_info->vendor_id = adev->pdev->vendor; + dev_info->socket_id = adev->smuio.funcs->get_socket_id(adev); + + return 0; +} + +static int amdgpu_ras_sys_gpu_reset_lock(struct ras_core_context *ras_core, + bool down, bool try) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret = 0; + + if (down && try) + ret = down_read_trylock(&adev->reset_domain->sem); + else if (down) + down_read(&adev->reset_domain->sem); + else + up_read(&adev->reset_domain->sem); + + return ret; +} + +static bool amdgpu_ras_sys_detect_ras_interrupt(struct ras_core_context *ras_core) +{ + return !!atomic_read(&amdgpu_ras_in_intr); +} + +static int amdgpu_ras_sys_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct psp_context *psp = &adev->psp; + struct psp_ring *psp_ring; + struct ta_mem_context *mem_ctx; + + if (mem_type == GPU_MEM_TYPE_RAS_PSP_RING) { + psp_ring = &psp->km_ring; + gpu_mem->mem_bo = adev->firmware.rbuf; + gpu_mem->mem_size = psp_ring->ring_size; + gpu_mem->mem_mc_addr = psp_ring->ring_mem_mc_addr; + gpu_mem->mem_cpu_addr = psp_ring->ring_mem; + } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_CMD) { + gpu_mem->mem_bo = psp->cmd_buf_bo; + gpu_mem->mem_size = PSP_CMD_BUFFER_SIZE; + gpu_mem->mem_mc_addr = psp->cmd_buf_mc_addr; + gpu_mem->mem_cpu_addr = psp->cmd_buf_mem; + } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_FENCE) { + gpu_mem->mem_bo = psp->fence_buf_bo; + gpu_mem->mem_size = PSP_FENCE_BUFFER_SIZE; + gpu_mem->mem_mc_addr = psp->fence_buf_mc_addr; + gpu_mem->mem_cpu_addr = psp->fence_buf; + } else if (mem_type == GPU_MEM_TYPE_RAS_TA_FW) { + gpu_mem->mem_bo = psp->fw_pri_bo; + gpu_mem->mem_size = PSP_1_MEG; + gpu_mem->mem_mc_addr = psp->fw_pri_mc_addr; + gpu_mem->mem_cpu_addr = psp->fw_pri_buf; + } else if (mem_type == GPU_MEM_TYPE_RAS_TA_CMD) { + mem_ctx = &psp->ras_context.context.mem_context; + gpu_mem->mem_bo = mem_ctx->shared_bo; + gpu_mem->mem_size = mem_ctx->shared_mem_size; + gpu_mem->mem_mc_addr = mem_ctx->shared_mc_addr; + gpu_mem->mem_cpu_addr = mem_ctx->shared_buf; + } else { + return -EINVAL; + } + + if (!gpu_mem->mem_bo || !gpu_mem->mem_size || + !gpu_mem->mem_mc_addr || !gpu_mem->mem_cpu_addr) { + RAS_DEV_ERR(ras_core->dev, "The ras psp gpu memory is invalid!\n"); + return -ENOMEM; + } + + return 0; +} + +static int amdgpu_ras_sys_put_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + + return 0; +} + +const struct ras_sys_func amdgpu_ras_sys_fn = { + .ras_notifier = amdgpu_ras_sys_event_notifier, + .get_utc_second_timestamp = amdgpu_ras_sys_get_utc_second_timestamp, + .gen_seqno = amdgpu_ras_sys_gen_seqno, + .check_gpu_status = amdgpu_ras_sys_check_gpu_status, + .get_device_system_info = amdgpu_ras_sys_get_device_system_info, + .gpu_reset_lock = amdgpu_ras_sys_gpu_reset_lock, + .detect_ras_interrupt = amdgpu_ras_sys_detect_ras_interrupt, + .get_gpu_mem = amdgpu_ras_sys_get_gpu_mem, + .put_gpu_mem = amdgpu_ras_sys_put_gpu_mem, +}; diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h new file mode 100644 index 000000000000..8156531a7b63 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_SYS_H__ +#define __RAS_SYS_H__ +#include <linux/stdarg.h> +#include <linux/printk.h> +#include <linux/dev_printk.h> +#include <linux/mempool.h> +#include "amdgpu.h" + +#define RAS_DEV_ERR(device, fmt, ...) \ + do { \ + if (device) \ + dev_err(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_ERR fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_DEV_WARN(device, fmt, ...) \ + do { \ + if (device) \ + dev_warn(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_WARNING fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_DEV_INFO(device, fmt, ...) \ + do { \ + if (device) \ + dev_info(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_INFO fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_DEV_DBG(device, fmt, ...) \ + do { \ + if (device) \ + dev_dbg(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_INFO(fmt, ...) printk(KERN_INFO fmt, ##__VA_ARGS__) + +#define RAS_DEV_RREG32_SOC15(dev, ip, inst, reg) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ + 0, ip##_HWIP, inst); \ +}) + +#define RAS_DEV_WREG32_SOC15(dev, ip, inst, reg, value) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \ + value, 0, ip##_HWIP, inst); \ +}) + +/* GET_INST returns the physical instance corresponding to a logical instance */ +#define RAS_GET_INST(dev, ip, inst) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + adev->ip_map.logical_to_dev_inst ? \ + adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst; \ +}) + +#define RAS_GET_MASK(dev, ip, mask) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + (adev->ip_map.logical_to_dev_mask ? \ + adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask); \ +}) + +static inline void *ras_radix_tree_delete_iter(struct radix_tree_root *root, void *iter) +{ + return radix_tree_delete(root, ((struct radix_tree_iter *)iter)->index); +} + +static inline long ras_wait_event_interruptible_timeout(void *wq_head, + int (*condition)(void *param), void *param, unsigned int timeout) +{ + return wait_event_interruptible_timeout(*(wait_queue_head_t *)wq_head, + condition(param), timeout); +} + +extern const struct ras_sys_func amdgpu_ras_sys_fn; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/Makefile b/drivers/gpu/drm/amd/ras/rascore/Makefile index e69de29bb2d1..e826a1f86424 100644 --- a/drivers/gpu/drm/amd/ras/rascore/Makefile +++ b/drivers/gpu/drm/amd/ras/rascore/Makefile @@ -0,0 +1,44 @@ +# +# Copyright 2025 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +RAS_CORE_FILES = ras_core.o \ + ras_mp1.o \ + ras_mp1_v13_0.o \ + ras_aca.o \ + ras_aca_v1_0.o \ + ras_eeprom.o \ + ras_umc.o \ + ras_umc_v12_0.o \ + ras_cmd.o \ + ras_gfx.o \ + ras_gfx_v9_0.o \ + ras_process.o \ + ras_nbio.o \ + ras_nbio_v7_9.o \ + ras_log_ring.o \ + ras_cper.o \ + ras_psp.o \ + ras_psp_v13_0.o + + +RAS_CORE = $(addprefix $(AMD_GPU_RAS_PATH)/rascore/,$(RAS_CORE_FILES)) + +AMD_GPU_RAS_FILES += $(RAS_CORE) diff --git a/drivers/gpu/drm/amd/ras/rascore/ras.h b/drivers/gpu/drm/amd/ras/rascore/ras.h new file mode 100644 index 000000000000..3396b2e0949d --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras.h @@ -0,0 +1,370 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_H__ +#define __RAS_H__ +#include "ras_sys.h" +#include "ras_umc.h" +#include "ras_aca.h" +#include "ras_eeprom.h" +#include "ras_core_status.h" +#include "ras_process.h" +#include "ras_gfx.h" +#include "ras_cmd.h" +#include "ras_nbio.h" +#include "ras_mp1.h" +#include "ras_psp.h" +#include "ras_log_ring.h" + +#define RAS_HW_ERR "[Hardware Error]: " + +#define RAS_GPU_PAGE_SHIFT 12 +#define RAS_ADDR_TO_PFN(addr) ((addr) >> RAS_GPU_PAGE_SHIFT) +#define RAS_PFN_TO_ADDR(pfn) ((pfn) << RAS_GPU_PAGE_SHIFT) + +#define RAS_CORE_RESET_GPU 0x10000 + +#define GPU_RESET_CAUSE_POISON (RAS_CORE_RESET_GPU | 0x0001) +#define GPU_RESET_CAUSE_FATAL (RAS_CORE_RESET_GPU | 0x0002) +#define GPU_RESET_CAUSE_RMA (RAS_CORE_RESET_GPU | 0x0004) + +enum ras_block_id { + RAS_BLOCK_ID__UMC = 0, + RAS_BLOCK_ID__SDMA, + RAS_BLOCK_ID__GFX, + RAS_BLOCK_ID__MMHUB, + RAS_BLOCK_ID__ATHUB, + RAS_BLOCK_ID__PCIE_BIF, + RAS_BLOCK_ID__HDP, + RAS_BLOCK_ID__XGMI_WAFL, + RAS_BLOCK_ID__DF, + RAS_BLOCK_ID__SMN, + RAS_BLOCK_ID__SEM, + RAS_BLOCK_ID__MP0, + RAS_BLOCK_ID__MP1, + RAS_BLOCK_ID__FUSE, + RAS_BLOCK_ID__MCA, + RAS_BLOCK_ID__VCN, + RAS_BLOCK_ID__JPEG, + RAS_BLOCK_ID__IH, + RAS_BLOCK_ID__MPIO, + + RAS_BLOCK_ID__LAST +}; + +enum ras_ecc_err_type { + RAS_ECC_ERR__NONE = 0, + RAS_ECC_ERR__PARITY = 1, + RAS_ECC_ERR__SINGLE_CORRECTABLE = 2, + RAS_ECC_ERR__MULTI_UNCORRECTABLE = 4, + RAS_ECC_ERR__POISON = 8, +}; + +enum ras_err_type { + RAS_ERR_TYPE__UE = 0, + RAS_ERR_TYPE__CE, + RAS_ERR_TYPE__DE, + RAS_ERR_TYPE__LAST +}; + +enum ras_seqno_type { + RAS_SEQNO_TYPE_INVALID = 0, + RAS_SEQNO_TYPE_UE, + RAS_SEQNO_TYPE_CE, + RAS_SEQNO_TYPE_DE, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, + RAS_SEQNO_TYPE_COUNT_MAX, +}; + +enum ras_seqno_fifo { + SEQNO_FIFO_INVALID = 0, + SEQNO_FIFO_POISON_CREATION, + SEQNO_FIFO_POISON_CONSUMPTION, + SEQNO_FIFO_COUNT_MAX +}; + +enum ras_notify_event { + RAS_EVENT_ID__NONE, + RAS_EVENT_ID__BAD_PAGE_DETECTED, + RAS_EVENT_ID__POISON_CONSUMPTION, + RAS_EVENT_ID__RESERVE_BAD_PAGE, + RAS_EVENT_ID__DEVICE_RMA, + RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM, + RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP, + RAS_EVENT_ID__FATAL_ERROR_DETECTED, + RAS_EVENT_ID__RESET_GPU, + RAS_EVENT_ID__RESET_VF, + RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN, + RAS_EVENT_ID__RAS_EVENT_PROC_END, +}; + +enum ras_gpu_status { + RAS_GPU_STATUS__NOT_READY = 0, + RAS_GPU_STATUS__READY = 0x1, + RAS_GPU_STATUS__IN_RESET = 0x2, + RAS_GPU_STATUS__IS_RMA = 0x4, + RAS_GPU_STATUS__IS_VF = 0x8, +}; + +struct ras_core_context; +struct ras_bank_ecc; +struct ras_umc; +struct ras_aca; +struct ras_process; +struct ras_nbio; +struct ras_log_ring; +struct ras_psp; + +struct ras_mp1_sys_func { + int (*mp1_get_valid_bank_count)(struct ras_core_context *ras_core, + u32 msg, u32 *count); + int (*mp1_dump_valid_bank)(struct ras_core_context *ras_core, + u32 msg, u32 idx, u32 reg_idx, u64 *val); +}; + +struct ras_eeprom_sys_func { + int (*eeprom_i2c_xfer)(struct ras_core_context *ras_core, + u32 eeprom_addr, u8 *eeprom_buf, u32 buf_size, bool read); + int (*update_eeprom_i2c_config)(struct ras_core_context *ras_core); +}; + +struct ras_nbio_sys_func { + int (*set_ras_controller_irq_state)(struct ras_core_context *ras_core, + bool state); + int (*set_ras_err_event_athub_irq_state)(struct ras_core_context *ras_core, + bool state); +}; + +struct ras_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + long tm_year; +}; + +struct device_system_info { + uint32_t device_id; + uint32_t vendor_id; + uint32_t socket_id; +}; + +enum gpu_mem_type { + GPU_MEM_TYPE_DEFAULT, + GPU_MEM_TYPE_RAS_PSP_RING, + GPU_MEM_TYPE_RAS_PSP_CMD, + GPU_MEM_TYPE_RAS_PSP_FENCE, + GPU_MEM_TYPE_RAS_TA_FW, + GPU_MEM_TYPE_RAS_TA_CMD, +}; + +struct ras_psp_sys_func { + int (*get_ras_psp_system_status)(struct ras_core_context *ras_core, + struct ras_psp_sys_status *status); + int (*get_ras_ta_init_param)(struct ras_core_context *ras_core, + struct ras_ta_init_param *ras_ta_param); +}; + +struct ras_sys_func { + int (*gpu_reset_lock)(struct ras_core_context *ras_core, + bool down, bool try); + int (*check_gpu_status)(struct ras_core_context *ras_core, + uint32_t *status); + int (*gen_seqno)(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t *seqno); + int (*async_handle_ras_event)(struct ras_core_context *ras_core, void *data); + int (*ras_notifier)(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data); + u64 (*get_utc_second_timestamp)(struct ras_core_context *ras_core); + int (*get_device_system_info)(struct ras_core_context *ras_core, + struct device_system_info *dev_info); + bool (*detect_ras_interrupt)(struct ras_core_context *ras_core); + int (*get_gpu_mem)(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); + int (*put_gpu_mem)(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); +}; + +struct ras_ecc_count { + uint64_t new_ce_count; + uint64_t total_ce_count; + uint64_t new_ue_count; + uint64_t total_ue_count; + uint64_t new_de_count; + uint64_t total_de_count; +}; + +struct ras_bank_ecc { + uint32_t nps; + uint64_t seq_no; + uint64_t status; + uint64_t ipid; + uint64_t addr; +}; + +struct ras_bank_ecc_node { + struct list_head node; + struct ras_bank_ecc ecc; +}; + +struct ras_aca_config { + u32 socket_num_per_hive; + u32 aid_num_per_socket; + u32 xcd_num_per_aid; +}; + +struct ras_mp1_config { + const struct ras_mp1_sys_func *mp1_sys_fn; +}; + +struct ras_nbio_config { + const struct ras_nbio_sys_func *nbio_sys_fn; +}; + +struct ras_psp_config { + const struct ras_psp_sys_func *psp_sys_fn; +}; + +struct ras_umc_config { + uint32_t umc_vram_type; +}; + +struct ras_eeprom_config { + const struct ras_eeprom_sys_func *eeprom_sys_fn; + int eeprom_record_threshold_config; + uint32_t eeprom_record_threshold_count; + void *eeprom_i2c_adapter; + u32 eeprom_i2c_addr; + u32 eeprom_i2c_port; + u16 max_i2c_read_len; + u16 max_i2c_write_len; +}; + +struct ras_core_config { + u32 aca_ip_version; + u32 umc_ip_version; + u32 mp1_ip_version; + u32 gfx_ip_version; + u32 nbio_ip_version; + u32 psp_ip_version; + + bool poison_supported; + bool ras_eeprom_supported; + const struct ras_sys_func *sys_fn; + + struct ras_aca_config aca_cfg; + struct ras_mp1_config mp1_cfg; + struct ras_nbio_config nbio_cfg; + struct ras_psp_config psp_cfg; + struct ras_eeprom_config eeprom_cfg; + struct ras_umc_config umc_cfg; +}; + +struct ras_core_context { + void *dev; + struct ras_core_config *config; + u32 socket_num_per_hive; + u32 aid_num_per_socket; + u32 xcd_num_per_aid; + int max_ue_banks_per_query; + int max_ce_banks_per_query; + struct ras_aca ras_aca; + + bool ras_eeprom_supported; + struct ras_eeprom_control ras_eeprom; + + struct ras_psp ras_psp; + struct ras_umc ras_umc; + struct ras_nbio ras_nbio; + struct ras_gfx ras_gfx; + struct ras_mp1 ras_mp1; + struct ras_process ras_proc; + struct ras_cmd_mgr ras_cmd; + struct ras_log_ring ras_log_ring; + + const struct ras_sys_func *sys_fn; + + /* is poison mode supported */ + bool poison_supported; + + bool is_rma; + bool is_initialized; + + struct kfifo de_seqno_fifo; + struct kfifo consumption_seqno_fifo; + spinlock_t seqno_lock; + + bool ras_core_enabled; +}; + +struct ras_core_context *ras_core_create(struct ras_core_config *init_config); +void ras_core_destroy(struct ras_core_context *ras_core); +int ras_core_sw_init(struct ras_core_context *ras_core); +int ras_core_sw_fini(struct ras_core_context *ras_core); +int ras_core_hw_init(struct ras_core_context *ras_core); +int ras_core_hw_fini(struct ras_core_context *ras_core); +bool ras_core_is_ready(struct ras_core_context *ras_core); +uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type); +uint64_t ras_core_get_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, bool pop); + +int ras_core_put_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t seqno); + +int ras_core_update_ecc_info(struct ras_core_context *ras_core); +int ras_core_query_block_ecc_data(struct ras_core_context *ras_core, + enum ras_block_id block, struct ras_ecc_count *ecc_count); + +bool ras_core_gpu_in_reset(struct ras_core_context *ras_core); +bool ras_core_gpu_is_rma(struct ras_core_context *ras_core); +bool ras_core_gpu_is_vf(struct ras_core_context *ras_core); +bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data); +int ras_core_handle_fatal_error(struct ras_core_context *ras_core); + +uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core); +const char *ras_core_get_ras_block_name(enum ras_block_id block_id); +int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core, + uint64_t timestamp, struct ras_time *tm); + +int ras_core_set_status(struct ras_core_context *ras_core, bool enable); +bool ras_core_is_enabled(struct ras_core_context *ras_core); +uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core); +int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa); +bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core); +int ras_core_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); +int ras_core_put_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); +bool ras_core_check_safety_watermark(struct ras_core_context *ras_core); +int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core); +void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core); +void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core); +int ras_core_event_notify(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data); +int ras_core_get_device_system_info(struct ras_core_context *ras_core, + struct device_system_info *dev_info); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c new file mode 100644 index 000000000000..e433c70d2989 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c @@ -0,0 +1,672 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_aca.h" +#include "ras_aca_v1_0.h" +#include "ras_mp1_v13_0.h" + +#define ACA_MARK_FATAL_FLAG 0x100 +#define ACA_MARK_UE_READ_FLAG 0x1 + +#define blk_name(block_id) ras_core_get_ras_block_name(block_id) + +static struct aca_regs_dump { + const char *name; + int reg_idx; +} aca_regs[] = { + {"CONTROL", ACA_REG_IDX__CTL}, + {"STATUS", ACA_REG_IDX__STATUS}, + {"ADDR", ACA_REG_IDX__ADDR}, + {"MISC", ACA_REG_IDX__MISC0}, + {"CONFIG", ACA_REG_IDX__CONFG}, + {"IPID", ACA_REG_IDX__IPID}, + {"SYND", ACA_REG_IDX__SYND}, + {"DESTAT", ACA_REG_IDX__DESTAT}, + {"DEADDR", ACA_REG_IDX__DEADDR}, + {"CONTROL_MASK", ACA_REG_IDX__CTL_MASK}, +}; + + +static void aca_report_ecc_info(struct ras_core_context *ras_core, + u64 seq_no, u32 blk, u32 skt, u32 aid, + struct aca_aid_ecc *aid_ecc, + struct aca_bank_ecc *new_ecc) +{ + struct aca_ecc_count ecc_count = {0}; + + ecc_count.new_ue_count = new_ecc->ue_count; + ecc_count.new_de_count = new_ecc->de_count; + ecc_count.new_ce_count = new_ecc->ce_count; + if (blk == RAS_BLOCK_ID__GFX) { + struct aca_ecc_count *xcd_ecc; + int xcd_id; + + for (xcd_id = 0; xcd_id < aid_ecc->xcd.xcd_num; xcd_id++) { + xcd_ecc = &aid_ecc->xcd.xcd[xcd_id].ecc_err; + ecc_count.total_ue_count += xcd_ecc->total_ue_count; + ecc_count.total_de_count += xcd_ecc->total_de_count; + ecc_count.total_ce_count += xcd_ecc->total_ce_count; + } + } else { + ecc_count.total_ue_count = aid_ecc->ecc_err.total_ue_count; + ecc_count.total_de_count = aid_ecc->ecc_err.total_de_count; + ecc_count.total_ce_count = aid_ecc->ecc_err.total_ce_count; + } + + if (ecc_count.new_ue_count) { + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u new uncorrectable hardware errors detected in %s block\n", + seq_no, skt, aid, ecc_count.new_ue_count, blk_name(blk)); + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u uncorrectable hardware errors detected in total in %s block\n", + seq_no, skt, aid, ecc_count.total_ue_count, blk_name(blk)); + } + + if (ecc_count.new_de_count) { + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u new %s detected in %s block\n", + seq_no, skt, aid, ecc_count.new_de_count, + (blk == RAS_BLOCK_ID__UMC) ? + "deferred hardware errors" : "poison consumption", + blk_name(blk)); + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u %s detected in total in %s block\n", + seq_no, skt, aid, ecc_count.total_de_count, + (blk == RAS_BLOCK_ID__UMC) ? + "deferred hardware errors" : "poison consumption", + blk_name(blk)); + } + + if (ecc_count.new_ce_count) { + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u new correctable hardware errors detected in %s block\n", + seq_no, skt, aid, ecc_count.new_ce_count, blk_name(blk)); + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u correctable hardware errors detected in total in %s block\n", + seq_no, skt, aid, ecc_count.total_ce_count, blk_name(blk)); + } +} + +static void aca_bank_log(struct ras_core_context *ras_core, + int idx, int total, struct aca_bank_reg *bank, + struct aca_bank_ecc *bank_ecc) +{ + int i; + + RAS_DEV_INFO(ras_core->dev, + "{%llu}" RAS_HW_ERR "Accelerator Check Architecture events logged\n", + bank->seq_no); + /* plus 1 for output format, e.g: ACA[08/08]: xxxx */ + for (i = 0; i < ARRAY_SIZE(aca_regs); i++) + RAS_DEV_INFO(ras_core->dev, + "{%llu}" RAS_HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", + bank->seq_no, idx + 1, total, + aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); +} + +static void aca_log_bank_data(struct ras_core_context *ras_core, + struct aca_bank_reg *bank, struct aca_bank_ecc *bank_ecc, + struct ras_log_batch_tag *batch) +{ + if (bank_ecc->ue_count) + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_UE, bank->regs, batch); + else if (bank_ecc->de_count) + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_DE, bank->regs, batch); + else + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_CE, bank->regs, batch); +} + +static int aca_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count) +{ + return ras_mp1_get_bank_count(ras_core, type, count); +} + +static bool aca_match_bank(struct aca_block *aca_blk, struct aca_bank_reg *bank) +{ + const struct aca_bank_hw_ops *bank_ops; + + if (!aca_blk->blk_info) + return false; + + bank_ops = &aca_blk->blk_info->bank_ops; + if (!bank_ops->bank_match) + return false; + + return bank_ops->bank_match(aca_blk, bank); +} + +static int aca_parse_bank(struct ras_core_context *ras_core, + struct aca_block *aca_blk, + struct aca_bank_reg *bank, + struct aca_bank_ecc *ecc) +{ + const struct aca_bank_hw_ops *bank_ops = &aca_blk->blk_info->bank_ops; + + if (!bank_ops || !bank_ops->bank_parse) + return -RAS_CORE_NOT_SUPPORTED; + + return bank_ops->bank_parse(ras_core, aca_blk, bank, ecc); +} + +static int aca_check_block_ecc_info(struct ras_core_context *ras_core, + struct aca_block *aca_blk, struct aca_ecc_info *info) +{ + if (info->socket_id >= aca_blk->ecc.socket_num_per_hive) { + RAS_DEV_ERR(ras_core->dev, + "Socket id (%d) is out of config! max:%u\n", + info->socket_id, aca_blk->ecc.socket_num_per_hive); + return -ENODATA; + } + + if (info->die_id >= aca_blk->ecc.socket[info->socket_id].aid_num) { + RAS_DEV_ERR(ras_core->dev, + "Die id (%d) is out of config! max:%u\n", + info->die_id, aca_blk->ecc.socket[info->socket_id].aid_num); + return -ENODATA; + } + + if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX) && + (info->xcd_id >= + aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num)) { + RAS_DEV_ERR(ras_core->dev, + "Xcd id (%d) is out of config! max:%u\n", + info->xcd_id, + aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num); + return -ENODATA; + } + + return 0; +} + +static int aca_log_bad_bank(struct ras_core_context *ras_core, + struct aca_block *aca_blk, struct aca_bank_reg *bank, + struct aca_bank_ecc *bank_ecc) +{ + struct aca_ecc_info *info; + struct aca_ecc_count *ecc_err; + struct aca_aid_ecc *aid_ecc; + int ret; + + info = &bank_ecc->bank_info; + + ret = aca_check_block_ecc_info(ras_core, aca_blk, info); + if (ret) + return ret; + + mutex_lock(&ras_core->ras_aca.aca_lock); + aid_ecc = &aca_blk->ecc.socket[info->socket_id].aid[info->die_id]; + if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX) + ecc_err = &aid_ecc->xcd.xcd[info->xcd_id].ecc_err; + else + ecc_err = &aid_ecc->ecc_err; + + ecc_err->new_ce_count += bank_ecc->ce_count; + ecc_err->total_ce_count += bank_ecc->ce_count; + ecc_err->new_ue_count += bank_ecc->ue_count; + ecc_err->total_ue_count += bank_ecc->ue_count; + ecc_err->new_de_count += bank_ecc->de_count; + ecc_err->total_de_count += bank_ecc->de_count; + mutex_unlock(&ras_core->ras_aca.aca_lock); + + if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC) && + bank_ecc->de_count) { + struct ras_bank_ecc ras_ecc = {0}; + + ras_ecc.nps = ras_core_get_curr_nps_mode(ras_core); + ras_ecc.addr = bank_ecc->bank_info.addr; + ras_ecc.ipid = bank_ecc->bank_info.ipid; + ras_ecc.status = bank_ecc->bank_info.status; + ras_ecc.seq_no = bank->seq_no; + + if (ras_core_gpu_in_reset(ras_core)) + ras_umc_log_bad_bank_pending(ras_core, &ras_ecc); + else + ras_umc_log_bad_bank(ras_core, &ras_ecc); + } + + aca_report_ecc_info(ras_core, + bank->seq_no, aca_blk->blk_info->ras_block_id, info->socket_id, info->die_id, + &aca_blk->ecc.socket[info->socket_id].aid[info->die_id], bank_ecc); + + return 0; +} + +static struct aca_block *aca_get_bank_aca_block(struct ras_core_context *ras_core, + struct aca_bank_reg *bank) +{ + int i = 0; + + for (i = 0; i < RAS_BLOCK_ID__LAST; i++) + if (aca_match_bank(&ras_core->ras_aca.aca_blk[i], bank)) + return &ras_core->ras_aca.aca_blk[i]; + + return NULL; +} + +static int aca_dump_bank(struct ras_core_context *ras_core, u32 ecc_type, + int idx, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + int i, ret, reg_cnt; + + reg_cnt = min_t(int, 16, ARRAY_SIZE(bank->regs)); + for (i = 0; i < reg_cnt; i++) { + ret = ras_mp1_dump_bank(ras_core, ecc_type, idx, i, &bank->regs[i]); + if (ret) + return ret; + } + + return 0; +} + +static uint64_t aca_get_bank_seqno(struct ras_core_context *ras_core, + enum ras_err_type err_type, struct aca_block *aca_blk, + struct aca_bank_ecc *bank_ecc) +{ + uint64_t seq_no = 0; + + if (bank_ecc->de_count) { + if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC) + seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_DE, true); + else + seq_no = ras_core_get_seqno(ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, true); + } else if (bank_ecc->ue_count) { + seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_UE, true); + } else { + seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_CE, true); + } + + return seq_no; +} + +static bool aca_dup_update_ue_in_fatal(struct ras_core_context *ras_core, + u32 ecc_type) +{ + struct ras_aca *aca = &ras_core->ras_aca; + + if (ecc_type != RAS_ERR_TYPE__UE) + return false; + + if (aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) { + if (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG) + return true; + + aca->ue_updated_mark |= ACA_MARK_UE_READ_FLAG; + } + + return false; +} + +void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core) +{ + struct ras_aca *aca = &ras_core->ras_aca; + + if (!aca) + return; + + aca->ue_updated_mark |= ACA_MARK_FATAL_FLAG; +} + +void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core) +{ + struct ras_aca *aca = &ras_core->ras_aca; + + if (!aca) + return; + + if ((aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) && + (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG)) + aca->ue_updated_mark = 0; +} + +static int aca_banks_update(struct ras_core_context *ras_core, + u32 ecc_type, void *data) +{ + struct aca_bank_reg bank; + struct aca_block *aca_blk; + struct aca_bank_ecc bank_ecc; + struct ras_log_batch_tag *batch_tag = NULL; + u32 count = 0; + int ret = 0; + int i; + + mutex_lock(&ras_core->ras_aca.bank_op_lock); + + if (aca_dup_update_ue_in_fatal(ras_core, ecc_type)) + goto out; + + ret = aca_get_bank_count(ras_core, ecc_type, &count); + if (ret) + goto out; + + if (!count) + goto out; + + batch_tag = ras_log_ring_create_batch_tag(ras_core); + for (i = 0; i < count; i++) { + memset(&bank, 0, sizeof(bank)); + ret = aca_dump_bank(ras_core, ecc_type, i, &bank); + if (ret) + break; + + bank.ecc_type = ecc_type; + + memset(&bank_ecc, 0, sizeof(bank_ecc)); + aca_blk = aca_get_bank_aca_block(ras_core, &bank); + if (aca_blk) + ret = aca_parse_bank(ras_core, aca_blk, &bank, &bank_ecc); + + bank.seq_no = aca_get_bank_seqno(ras_core, ecc_type, aca_blk, &bank_ecc); + + aca_log_bank_data(ras_core, &bank, &bank_ecc, batch_tag); + aca_bank_log(ras_core, i, count, &bank, &bank_ecc); + + if (!ret && aca_blk) + ret = aca_log_bad_bank(ras_core, aca_blk, &bank, &bank_ecc); + + if (ret) + break; + } + ras_log_ring_destroy_batch_tag(ras_core, batch_tag); + +out: + mutex_unlock(&ras_core->ras_aca.bank_op_lock); + return ret; +} + +int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 type, void *data) +{ + /* Update aca bank to aca source error_cache first */ + return aca_banks_update(ras_core, type, data); +} + +static struct aca_block *ras_aca_get_block_handle(struct ras_core_context *ras_core, uint32_t blk) +{ + return &ras_core->ras_aca.aca_blk[blk]; +} + +static int ras_aca_clear_block_ecc_count(struct ras_core_context *ras_core, u32 blk) +{ + struct aca_block *aca_blk; + struct aca_aid_ecc *aid_ecc; + int skt, aid, xcd; + + mutex_lock(&ras_core->ras_aca.aca_lock); + aca_blk = ras_aca_get_block_handle(ras_core, blk); + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + aid_ecc = &aca_blk->ecc.socket[skt].aid[aid]; + if (blk == RAS_BLOCK_ID__GFX) { + for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++) + memset(&aid_ecc->xcd.xcd[xcd], + 0, sizeof(struct aca_xcd_ecc)); + } else { + memset(&aid_ecc->ecc_err, 0, sizeof(aid_ecc->ecc_err)); + } + } + } + mutex_unlock(&ras_core->ras_aca.aca_lock); + + return 0; +} + +int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core) +{ + enum ras_block_id blk; + int ret; + + for (blk = RAS_BLOCK_ID__UMC; blk < RAS_BLOCK_ID__LAST; blk++) { + ret = ras_aca_clear_block_ecc_count(ras_core, blk); + if (ret) + break; + } + + return ret; +} + +int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk) +{ + struct aca_block *aca_blk; + int skt, aid, xcd; + struct aca_ecc_count *ecc_err; + struct aca_aid_ecc *aid_ecc; + + mutex_lock(&ras_core->ras_aca.aca_lock); + aca_blk = ras_aca_get_block_handle(ras_core, blk); + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + aid_ecc = &aca_blk->ecc.socket[skt].aid[aid]; + if (blk == RAS_BLOCK_ID__GFX) { + for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++) { + ecc_err = &aid_ecc->xcd.xcd[xcd].ecc_err; + ecc_err->new_ce_count = 0; + ecc_err->new_ue_count = 0; + ecc_err->new_de_count = 0; + } + } else { + ecc_err = &aid_ecc->ecc_err; + ecc_err->new_ce_count = 0; + ecc_err->new_ue_count = 0; + ecc_err->new_de_count = 0; + } + } + } + mutex_unlock(&ras_core->ras_aca.aca_lock); + + return 0; +} + +static int ras_aca_get_block_each_aid_ecc_count(struct ras_core_context *ras_core, + u32 blk, u32 skt, u32 aid, u32 xcd, + struct aca_ecc_count *ecc_count) +{ + struct aca_block *aca_blk; + struct aca_ecc_count *ecc_err; + + aca_blk = ras_aca_get_block_handle(ras_core, blk); + if (blk == RAS_BLOCK_ID__GFX) + ecc_err = &aca_blk->ecc.socket[skt].aid[aid].xcd.xcd[xcd].ecc_err; + else + ecc_err = &aca_blk->ecc.socket[skt].aid[aid].ecc_err; + + ecc_count->new_ce_count = ecc_err->new_ce_count; + ecc_count->total_ce_count = ecc_err->total_ce_count; + ecc_count->new_ue_count = ecc_err->new_ue_count; + ecc_count->total_ue_count = ecc_err->total_ue_count; + ecc_count->new_de_count = ecc_err->new_de_count; + ecc_count->total_de_count = ecc_err->total_de_count; + + return 0; +} + +static inline void _add_ecc_count(struct aca_ecc_count *des, struct aca_ecc_count *src) +{ + des->new_ce_count += src->new_ce_count; + des->total_ce_count += src->total_ce_count; + des->new_ue_count += src->new_ue_count; + des->total_ue_count += src->total_ue_count; + des->new_de_count += src->new_de_count; + des->total_de_count += src->total_de_count; +} + +static const struct ras_aca_ip_func *aca_get_ip_func( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(1, 0, 0): + return &ras_aca_func_v1_0; + default: + RAS_DEV_ERR(ras_core->dev, + "ACA ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core, + u32 blk, void *data) +{ + struct ras_ecc_count *err_data = (struct ras_ecc_count *)data; + struct aca_block *aca_blk; + int skt, aid, xcd; + struct aca_ecc_count ecc_xcd; + struct aca_ecc_count ecc_aid; + struct aca_ecc_count ecc; + + if (blk >= RAS_BLOCK_ID__LAST) + return -EINVAL; + + if (!err_data) + return -EINVAL; + + aca_blk = ras_aca_get_block_handle(ras_core, blk); + memset(&ecc, 0, sizeof(ecc)); + + mutex_lock(&ras_core->ras_aca.aca_lock); + if (blk == RAS_BLOCK_ID__GFX) { + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + memset(&ecc_aid, 0, sizeof(ecc_aid)); + for (xcd = 0; + xcd < aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num; + xcd++) { + memset(&ecc_xcd, 0, sizeof(ecc_xcd)); + if (ras_aca_get_block_each_aid_ecc_count(ras_core, + blk, skt, aid, xcd, &ecc_xcd)) + continue; + _add_ecc_count(&ecc_aid, &ecc_xcd); + } + _add_ecc_count(&ecc, &ecc_aid); + } + } + } else { + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + memset(&ecc_aid, 0, sizeof(ecc_aid)); + if (ras_aca_get_block_each_aid_ecc_count(ras_core, + blk, skt, aid, 0, &ecc_aid)) + continue; + _add_ecc_count(&ecc, &ecc_aid); + } + } + } + + err_data->new_ce_count = ecc.new_ce_count; + err_data->total_ce_count = ecc.total_ce_count; + err_data->new_ue_count = ecc.new_ue_count; + err_data->total_ue_count = ecc.total_ue_count; + err_data->new_de_count = ecc.new_de_count; + err_data->total_de_count = ecc.total_de_count; + mutex_unlock(&ras_core->ras_aca.aca_lock); + + return 0; +} + +int ras_aca_sw_init(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + struct ras_aca_config *aca_cfg = &ras_core->config->aca_cfg; + struct aca_block *aca_blk; + uint32_t socket_num_per_hive; + uint32_t aid_num_per_socket; + uint32_t xcd_num_per_aid; + int blk, skt, aid; + + socket_num_per_hive = aca_cfg->socket_num_per_hive; + aid_num_per_socket = aca_cfg->aid_num_per_socket; + xcd_num_per_aid = aca_cfg->xcd_num_per_aid; + + if (!xcd_num_per_aid || !aid_num_per_socket || + (socket_num_per_hive > MAX_SOCKET_NUM_PER_HIVE) || + (aid_num_per_socket > MAX_AID_NUM_PER_SOCKET) || + (xcd_num_per_aid > MAX_XCD_NUM_PER_AID)) { + RAS_DEV_ERR(ras_core->dev, "Invalid ACA system configuration: %d, %d, %d\n", + socket_num_per_hive, aid_num_per_socket, xcd_num_per_aid); + return -EINVAL; + } + + memset(ras_aca, 0, sizeof(*ras_aca)); + + for (blk = 0; blk < RAS_BLOCK_ID__LAST; blk++) { + aca_blk = &ras_aca->aca_blk[blk]; + aca_blk->ecc.socket_num_per_hive = socket_num_per_hive; + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + aca_blk->ecc.socket[skt].aid_num = aid_num_per_socket; + if (blk == RAS_BLOCK_ID__GFX) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) + aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num = + xcd_num_per_aid; + } + } + } + + mutex_init(&ras_aca->aca_lock); + mutex_init(&ras_aca->bank_op_lock); + + return 0; +} + +int ras_aca_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + + mutex_destroy(&ras_aca->aca_lock); + mutex_destroy(&ras_aca->bank_op_lock); + + return 0; +} + +int ras_aca_hw_init(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + struct aca_block *aca_blk; + const struct ras_aca_ip_func *ip_func; + int i; + + ras_aca->aca_ip_version = ras_core->config->aca_ip_version; + ip_func = aca_get_ip_func(ras_core, ras_aca->aca_ip_version); + if (!ip_func) + return -EINVAL; + + for (i = 0; i < ip_func->block_num; i++) { + aca_blk = &ras_aca->aca_blk[ip_func->block_info[i]->ras_block_id]; + aca_blk->blk_info = ip_func->block_info[i]; + } + + ras_aca->ue_updated_mark = 0; + + return 0; +} + +int ras_aca_hw_fini(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + + ras_aca->ue_updated_mark = 0; + + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h new file mode 100644 index 000000000000..f61b02a5f0fc --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_ACA_H__ +#define __RAS_ACA_H__ +#include "ras.h" + +#define MAX_SOCKET_NUM_PER_HIVE 8 +#define MAX_AID_NUM_PER_SOCKET 4 +#define MAX_XCD_NUM_PER_AID 2 +#define MAX_ACA_RAS_BLOCK 20 + +#define ACA_ERROR__UE_MASK (0x1 << RAS_ERR_TYPE__UE) +#define ACA_ERROR__CE_MASK (0x1 << RAS_ERR_TYPE__CE) +#define ACA_ERROR__DE_MASK (0x1 << RAS_ERR_TYPE__DE) + +enum ras_aca_reg_idx { + ACA_REG_IDX__CTL = 0, + ACA_REG_IDX__STATUS = 1, + ACA_REG_IDX__ADDR = 2, + ACA_REG_IDX__MISC0 = 3, + ACA_REG_IDX__CONFG = 4, + ACA_REG_IDX__IPID = 5, + ACA_REG_IDX__SYND = 6, + ACA_REG_IDX__DESTAT = 8, + ACA_REG_IDX__DEADDR = 9, + ACA_REG_IDX__CTL_MASK = 10, + ACA_REG_MAX_COUNT = 16, +}; + +struct ras_core_context; +struct aca_block; + +struct aca_bank_reg { + u32 ecc_type; + u64 seq_no; + u64 regs[ACA_REG_MAX_COUNT]; +}; + +enum aca_ecc_hwip { + ACA_ECC_HWIP__UNKNOWN = -1, + ACA_ECC_HWIP__PSP = 0, + ACA_ECC_HWIP__UMC, + ACA_ECC_HWIP__SMU, + ACA_ECC_HWIP__PCS_XGMI, + ACA_ECC_HWIP_COUNT, +}; + +struct aca_ecc_info { + int die_id; + int socket_id; + int xcd_id; + int hwid; + int mcatype; + uint64_t status; + uint64_t ipid; + uint64_t addr; +}; + +struct aca_bank_ecc { + struct aca_ecc_info bank_info; + u32 ce_count; + u32 ue_count; + u32 de_count; +}; + +struct aca_ecc_count { + u32 new_ce_count; + u32 total_ce_count; + u32 new_ue_count; + u32 total_ue_count; + u32 new_de_count; + u32 total_de_count; +}; + +struct aca_xcd_ecc { + struct aca_ecc_count ecc_err; +}; + +struct aca_aid_ecc { + union { + struct aca_xcd { + struct aca_xcd_ecc xcd[MAX_XCD_NUM_PER_AID]; + u32 xcd_num; + } xcd; + struct aca_ecc_count ecc_err; + }; +}; + +struct aca_socket_ecc { + struct aca_aid_ecc aid[MAX_AID_NUM_PER_SOCKET]; + u32 aid_num; +}; + +struct aca_block_ecc { + struct aca_socket_ecc socket[MAX_SOCKET_NUM_PER_HIVE]; + u32 socket_num_per_hive; +}; + +struct aca_bank_hw_ops { + bool (*bank_match)(struct aca_block *ras_blk, void *data); + int (*bank_parse)(struct ras_core_context *ras_core, + struct aca_block *aca_blk, void *data, void *buf); +}; + +struct aca_block_info { + char name[32]; + u32 ras_block_id; + enum aca_ecc_hwip hwip; + struct aca_bank_hw_ops bank_ops; + u32 mask; +}; + +struct aca_block { + const struct aca_block_info *blk_info; + struct aca_block_ecc ecc; +}; + +struct ras_aca_ip_func { + uint32_t block_num; + const struct aca_block_info **block_info; +}; + +struct ras_aca { + uint32_t aca_ip_version; + const struct ras_aca_ip_func *ip_func; + struct mutex aca_lock; + struct mutex bank_op_lock; + struct aca_block aca_blk[MAX_ACA_RAS_BLOCK]; + uint32_t ue_updated_mark; +}; + +int ras_aca_sw_init(struct ras_core_context *ras_core); +int ras_aca_sw_fini(struct ras_core_context *ras_core); +int ras_aca_hw_init(struct ras_core_context *ras_core); +int ras_aca_hw_fini(struct ras_core_context *ras_core); +int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core, u32 blk, void *data); +int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk); +int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core); +int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 ecc_type, void *data); +void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core); +void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c new file mode 100644 index 000000000000..29df98948703 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c @@ -0,0 +1,379 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_aca.h" +#include "ras_core_status.h" +#include "ras_aca_v1_0.h" + +struct ras_aca_hwip { + int hwid; + int mcatype; +}; + +static struct ras_aca_hwip aca_hwid_mcatypes[ACA_ECC_HWIP_COUNT] = { + [ACA_ECC_HWIP__SMU] = {0x01, 0x01}, + [ACA_ECC_HWIP__PCS_XGMI] = {0x50, 0x00}, + [ACA_ECC_HWIP__UMC] = {0x96, 0x00}, +}; + +static int aca_decode_bank_info(struct aca_block *aca_blk, + struct aca_bank_reg *bank, struct aca_ecc_info *info) +{ + u64 ipid; + u32 instidhi, instidlo; + + ipid = bank->regs[ACA_REG_IDX__IPID]; + info->hwid = ACA_REG_IPID_HARDWAREID(ipid); + info->mcatype = ACA_REG_IPID_MCATYPE(ipid); + /* + * Unified DieID Format: SAASS. A:AID, S:Socket. + * Unified DieID[4:4] = InstanceId[0:0] + * Unified DieID[0:3] = InstanceIdHi[0:3] + */ + instidhi = ACA_REG_IPID_INSTANCEIDHI(ipid); + instidlo = ACA_REG_IPID_INSTANCEIDLO(ipid); + info->die_id = ((instidhi >> 2) & 0x03); + info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03); + + if ((aca_blk->blk_info->hwip == ACA_ECC_HWIP__SMU) && + (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX)) + info->xcd_id = + ((instidlo & GENMASK_ULL(31, 1)) == mmSMNAID_XCD0_MCA_SMU) ? 0 : 1; + + return 0; +} + +static bool aca_check_bank_hwip(struct aca_bank_reg *bank, enum aca_ecc_hwip type) +{ + struct ras_aca_hwip *hwip; + int hwid, mcatype; + u64 ipid; + + if (!bank || (type == ACA_ECC_HWIP__UNKNOWN)) + return false; + + hwip = &aca_hwid_mcatypes[type]; + if (!hwip->hwid) + return false; + + ipid = bank->regs[ACA_REG_IDX__IPID]; + hwid = ACA_REG_IPID_HARDWAREID(ipid); + mcatype = ACA_REG_IPID_MCATYPE(ipid); + + return hwip->hwid == hwid && hwip->mcatype == mcatype; +} + +static bool aca_match_bank_default(struct aca_block *aca_blk, void *data) +{ + return aca_check_bank_hwip((struct aca_bank_reg *)data, aca_blk->blk_info->hwip); +} + +static bool aca_match_gfx_bank(struct aca_block *aca_blk, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + u32 instlo; + + if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip)) + return false; + + instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]); + instlo &= GENMASK_ULL(31, 1); + switch (instlo) { + case mmSMNAID_XCD0_MCA_SMU: + case mmSMNAID_XCD1_MCA_SMU: + case mmSMNXCD_XCD0_MCA_SMU: + return true; + default: + break; + } + + return false; +} + +static bool aca_match_sdma_bank(struct aca_block *aca_blk, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + /* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */ + static int sdma_err_codes[] = { 33, 34, 35, 36 }; + u32 instlo; + int errcode, i; + + if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip)) + return false; + + instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]); + instlo &= GENMASK_ULL(31, 1); + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]); + errcode &= 0xff; + + /* Check SDMA error codes */ + for (i = 0; i < ARRAY_SIZE(sdma_err_codes); i++) { + if (errcode == sdma_err_codes[i]) + return true; + } + + return false; +} + +static bool aca_match_mmhub_bank(struct aca_block *aca_blk, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + /* reference to smu driver if header file */ + const int mmhub_err_codes[] = { + 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */ + 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */ + 10, /* CODE_UTCL2_ROUTER */ + 11, /* CODE_VML2 */ + 12, /* CODE_VML2_WALKER */ + 13, /* CODE_MMCANE */ + }; + u32 instlo; + int errcode, i; + + if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip)) + return false; + + instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]); + instlo &= GENMASK_ULL(31, 1); + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]); + errcode &= 0xff; + + /* Check MMHUB error codes */ + for (i = 0; i < ARRAY_SIZE(mmhub_err_codes); i++) { + if (errcode == mmhub_err_codes[i]) + return true; + } + + return false; +} + +static bool aca_check_umc_de(struct ras_core_context *ras_core, uint64_t mc_umc_status) +{ + return (ras_core->poison_supported && + ACA_REG_STATUS_VAL(mc_umc_status) && + ACA_REG_STATUS_DEFERRED(mc_umc_status)); +} + +static bool aca_check_umc_ue(struct ras_core_context *ras_core, uint64_t mc_umc_status) +{ + if (aca_check_umc_de(ras_core, mc_umc_status)) + return false; + + return (ACA_REG_STATUS_VAL(mc_umc_status) && + (ACA_REG_STATUS_PCC(mc_umc_status) || + ACA_REG_STATUS_UC(mc_umc_status) || + ACA_REG_STATUS_TCC(mc_umc_status))); +} + +static bool aca_check_umc_ce(struct ras_core_context *ras_core, uint64_t mc_umc_status) +{ + if (aca_check_umc_de(ras_core, mc_umc_status)) + return false; + + return (ACA_REG_STATUS_VAL(mc_umc_status) && + (ACA_REG_STATUS_CECC(mc_umc_status) || + (ACA_REG_STATUS_UECC(mc_umc_status) && + ACA_REG_STATUS_UC(mc_umc_status) == 0) || + /* Identify data parity error in replay mode */ + ((ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0x5 || + ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0xb) && + !(aca_check_umc_ue(ras_core, mc_umc_status))))); +} + +static int aca_parse_umc_bank(struct ras_core_context *ras_core, + struct aca_block *ras_blk, void *data, void *buf) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf; + struct aca_ecc_info bank_info; + uint32_t ext_error_code; + uint64_t status0; + + status0 = bank->regs[ACA_REG_IDX__STATUS]; + if (!ACA_REG_STATUS_VAL(status0)) + return 0; + + memset(&bank_info, 0, sizeof(bank_info)); + aca_decode_bank_info(ras_blk, bank, &bank_info); + memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info)); + ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS]; + ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID]; + ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR]; + + ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status0); + + if (aca_check_umc_de(ras_core, status0)) + ecc->de_count = 1; + else if (aca_check_umc_ue(ras_core, status0)) + ecc->ue_count = ext_error_code ? + 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); + else if (aca_check_umc_ce(ras_core, status0)) + ecc->ce_count = ext_error_code ? + 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); + + return 0; +} + +static bool aca_check_bank_is_de(struct ras_core_context *ras_core, + uint64_t status) +{ + return (ACA_REG_STATUS_POISON(status) || + ACA_REG_STATUS_DEFERRED(status)); +} + +static int aca_parse_bank_default(struct ras_core_context *ras_core, + struct aca_block *ras_blk, + void *data, void *buf) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf; + struct aca_ecc_info bank_info; + u64 misc0 = bank->regs[ACA_REG_IDX__MISC0]; + u64 status = bank->regs[ACA_REG_IDX__STATUS]; + + memset(&bank_info, 0, sizeof(bank_info)); + aca_decode_bank_info(ras_blk, bank, &bank_info); + memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info)); + ecc->bank_info.status = status; + ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID]; + ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR]; + + if (aca_check_bank_is_de(ras_core, status)) { + ecc->de_count = 1; + } else { + if (bank->ecc_type == RAS_ERR_TYPE__UE) + ecc->ue_count = 1; + else if (bank->ecc_type == RAS_ERR_TYPE__CE) + ecc->ce_count = ACA_REG_MISC0_ERRCNT(misc0); + } + + return 0; +} + +static int aca_parse_xgmi_bank(struct ras_core_context *ras_core, + struct aca_block *ras_blk, + void *data, void *buf) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf; + struct aca_ecc_info bank_info; + u64 status, count; + int ext_error_code; + + memset(&bank_info, 0, sizeof(bank_info)); + aca_decode_bank_info(ras_blk, bank, &bank_info); + memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info)); + ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS]; + ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID]; + ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR]; + + status = bank->regs[ACA_REG_IDX__STATUS]; + ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status); + + count = ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); + if (bank->ecc_type == RAS_ERR_TYPE__UE) { + if (ext_error_code != 0 && ext_error_code != 9) + count = 0ULL; + ecc->ue_count = count; + } else if (bank->ecc_type == RAS_ERR_TYPE__CE) { + count = ext_error_code == 6 ? count : 0ULL; + ecc->ce_count = count; + } + + return 0; +} + +static const struct aca_block_info aca_v1_0_umc = { + .name = "umc", + .ras_block_id = RAS_BLOCK_ID__UMC, + .hwip = ACA_ECC_HWIP__UMC, + .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK | ACA_ERROR__DE_MASK, + .bank_ops = { + .bank_match = aca_match_bank_default, + .bank_parse = aca_parse_umc_bank, + }, +}; + +static const struct aca_block_info aca_v1_0_gfx = { + .name = "gfx", + .ras_block_id = RAS_BLOCK_ID__GFX, + .hwip = ACA_ECC_HWIP__SMU, + .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK, + .bank_ops = { + .bank_match = aca_match_gfx_bank, + .bank_parse = aca_parse_bank_default, + }, +}; + +static const struct aca_block_info aca_v1_0_sdma = { + .name = "sdma", + .ras_block_id = RAS_BLOCK_ID__SDMA, + .hwip = ACA_ECC_HWIP__SMU, + .mask = ACA_ERROR__UE_MASK, + .bank_ops = { + .bank_match = aca_match_sdma_bank, + .bank_parse = aca_parse_bank_default, + }, +}; + +static const struct aca_block_info aca_v1_0_mmhub = { + .name = "mmhub", + .ras_block_id = RAS_BLOCK_ID__MMHUB, + .hwip = ACA_ECC_HWIP__SMU, + .mask = ACA_ERROR__UE_MASK, + .bank_ops = { + .bank_match = aca_match_mmhub_bank, + .bank_parse = aca_parse_bank_default, + }, +}; + +static const struct aca_block_info aca_v1_0_xgmi = { + .name = "xgmi", + .ras_block_id = RAS_BLOCK_ID__XGMI_WAFL, + .hwip = ACA_ECC_HWIP__PCS_XGMI, + .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK, + .bank_ops = { + .bank_match = aca_match_bank_default, + .bank_parse = aca_parse_xgmi_bank, + }, +}; + +static const struct aca_block_info *aca_block_info_v1_0[] = { + &aca_v1_0_umc, + &aca_v1_0_gfx, + &aca_v1_0_sdma, + &aca_v1_0_mmhub, + &aca_v1_0_xgmi, +}; + +const struct ras_aca_ip_func ras_aca_func_v1_0 = { + .block_num = ARRAY_SIZE(aca_block_info_v1_0), + .block_info = aca_block_info_v1_0, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h new file mode 100644 index 000000000000..40e5d94b037f --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_ACA_V1_0_H__ +#define __RAS_ACA_V1_0_H__ +#include "ras.h" + +#define ACA__REG__FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l) +#define ACA_REG_STATUS_VAL(x) ACA__REG__FIELD(x, 63, 63) +#define ACA_REG_STATUS_OVERFLOW(x) ACA__REG__FIELD(x, 62, 62) +#define ACA_REG_STATUS_UC(x) ACA__REG__FIELD(x, 61, 61) +#define ACA_REG_STATUS_EN(x) ACA__REG__FIELD(x, 60, 60) +#define ACA_REG_STATUS_MISCV(x) ACA__REG__FIELD(x, 59, 59) +#define ACA_REG_STATUS_ADDRV(x) ACA__REG__FIELD(x, 58, 58) +#define ACA_REG_STATUS_PCC(x) ACA__REG__FIELD(x, 57, 57) +#define ACA_REG_STATUS_ERRCOREIDVAL(x) ACA__REG__FIELD(x, 56, 56) +#define ACA_REG_STATUS_TCC(x) ACA__REG__FIELD(x, 55, 55) +#define ACA_REG_STATUS_SYNDV(x) ACA__REG__FIELD(x, 53, 53) +#define ACA_REG_STATUS_CECC(x) ACA__REG__FIELD(x, 46, 46) +#define ACA_REG_STATUS_UECC(x) ACA__REG__FIELD(x, 45, 45) +#define ACA_REG_STATUS_DEFERRED(x) ACA__REG__FIELD(x, 44, 44) +#define ACA_REG_STATUS_POISON(x) ACA__REG__FIELD(x, 43, 43) +#define ACA_REG_STATUS_SCRUB(x) ACA__REG__FIELD(x, 40, 40) +#define ACA_REG_STATUS_ERRCOREID(x) ACA__REG__FIELD(x, 37, 32) +#define ACA_REG_STATUS_ADDRLSB(x) ACA__REG__FIELD(x, 29, 24) +#define ACA_REG_STATUS_ERRORCODEEXT(x) ACA__REG__FIELD(x, 21, 16) +#define ACA_REG_STATUS_ERRORCODE(x) ACA__REG__FIELD(x, 15, 0) + +#define ACA_REG_IPID_MCATYPE(x) ACA__REG__FIELD(x, 63, 48) +#define ACA_REG_IPID_INSTANCEIDHI(x) ACA__REG__FIELD(x, 47, 44) +#define ACA_REG_IPID_HARDWAREID(x) ACA__REG__FIELD(x, 43, 32) +#define ACA_REG_IPID_INSTANCEIDLO(x) ACA__REG__FIELD(x, 31, 0) + +#define ACA_REG_MISC0_VALID(x) ACA__REG__FIELD(x, 63, 63) +#define ACA_REG_MISC0_OVRFLW(x) ACA__REG__FIELD(x, 48, 48) +#define ACA_REG_MISC0_ERRCNT(x) ACA__REG__FIELD(x, 43, 32) + +#define ACA_REG_SYND_ERRORINFORMATION(x) ACA__REG__FIELD(x, 17, 0) + +/* NOTE: The following codes refers to the smu header file */ +#define ACA_EXTERROR_CODE_CE 0x3a +#define ACA_EXTERROR_CODE_FAULT 0x3b + +#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */ +#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */ +#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */ +#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */ + +extern const struct ras_aca_ip_func ras_aca_func_v1_0; +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c new file mode 100644 index 000000000000..94e6d7420d94 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_cmd.h" + +#define RAS_CMD_MAJOR_VERSION 6 +#define RAS_CMD_MINOR_VERSION 0 +#define RAS_CMD_VERSION (((RAS_CMD_MAJOR_VERSION) << 10) | (RAS_CMD_MINOR_VERSION)) + +static int ras_cmd_add_device(struct ras_core_context *ras_core) +{ + INIT_LIST_HEAD(&ras_core->ras_cmd.head); + ras_core->ras_cmd.ras_core = ras_core; + ras_core->ras_cmd.dev_handle = (uintptr_t)ras_core ^ RAS_CMD_DEV_HANDLE_MAGIC; + return 0; +} + +static int ras_cmd_remove_device(struct ras_core_context *ras_core) +{ + memset(&ras_core->ras_cmd, 0, sizeof(ras_core->ras_cmd)); + return 0; +} + +static int ras_get_block_ecc_info(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_block_ecc_info_req *input_data = + (struct ras_cmd_block_ecc_info_req *)cmd->input_buff_raw; + struct ras_cmd_block_ecc_info_rsp *output_data = + (struct ras_cmd_block_ecc_info_rsp *)cmd->output_buff_raw; + struct ras_ecc_count err_data; + int ret; + + if (cmd->input_size != sizeof(struct ras_cmd_block_ecc_info_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + memset(&err_data, 0, sizeof(err_data)); + ret = ras_aca_get_block_ecc_count(ras_core, input_data->block_id, &err_data); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + output_data->ce_count = err_data.total_ce_count; + output_data->ue_count = err_data.total_ue_count; + output_data->de_count = err_data.total_de_count; + + cmd->output_size = sizeof(struct ras_cmd_block_ecc_info_rsp); + return RAS_CMD__SUCCESS; +} + +static void ras_cmd_update_bad_page_info(struct ras_cmd_bad_page_record *ras_cmd_record, + struct eeprom_umc_record *record) +{ + ras_cmd_record->retired_page = record->cur_nps_retired_row_pfn; + ras_cmd_record->ts = record->ts; + ras_cmd_record->err_type = record->err_type; + ras_cmd_record->mem_channel = record->mem_channel; + ras_cmd_record->mcumc_id = record->mcumc_id; + ras_cmd_record->address = record->address; + ras_cmd_record->bank = record->bank; + ras_cmd_record->valid = 1; +} + +static int ras_cmd_get_group_bad_pages(struct ras_core_context *ras_core, + uint32_t group_index, struct ras_cmd_bad_pages_info_rsp *output_data) +{ + struct eeprom_umc_record record; + struct ras_cmd_bad_page_record *ras_cmd_record; + uint32_t i = 0, bp_cnt = 0, group_cnt = 0; + + output_data->bp_in_group = 0; + output_data->group_index = 0; + + bp_cnt = ras_umc_get_badpage_count(ras_core); + if (bp_cnt) { + output_data->group_index = group_index; + group_cnt = bp_cnt / RAS_CMD_MAX_BAD_PAGES_PER_GROUP + + ((bp_cnt % RAS_CMD_MAX_BAD_PAGES_PER_GROUP) ? 1 : 0); + + if (group_index >= group_cnt) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + i = group_index * RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + for (; + i < bp_cnt && output_data->bp_in_group < RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + i++) { + if (ras_umc_get_badpage_record(ras_core, i, &record)) + return RAS_CMD__ERROR_GENERIC; + + ras_cmd_record = &output_data->records[i % RAS_CMD_MAX_BAD_PAGES_PER_GROUP]; + + memset(ras_cmd_record, 0, sizeof(*ras_cmd_record)); + ras_cmd_update_bad_page_info(ras_cmd_record, &record); + output_data->bp_in_group++; + } + } + output_data->bp_total_cnt = bp_cnt; + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_bad_pages(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_bad_pages_info_req *input_data = + (struct ras_cmd_bad_pages_info_req *)cmd->input_buff_raw; + struct ras_cmd_bad_pages_info_rsp *output_data = + (struct ras_cmd_bad_pages_info_rsp *)cmd->output_buff_raw; + int ret; + + if (cmd->input_size != sizeof(struct ras_cmd_bad_pages_info_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + ret = ras_cmd_get_group_bad_pages(ras_core, input_data->group_index, output_data); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + output_data->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_bad_pages_info_rsp); + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_clear_bad_page_info(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + if (cmd->input_size != sizeof(struct ras_cmd_dev_handle)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if (ras_eeprom_reset_table(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + if (ras_umc_clean_badpage_data(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_reset_all_error_counts(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + if (cmd->input_size != sizeof(struct ras_cmd_dev_handle)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if (ras_aca_clear_all_blocks_ecc_count(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + if (ras_umc_clear_logged_ecc(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_cper_snapshot(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_cper_snapshot_rsp *output_data = + (struct ras_cmd_cper_snapshot_rsp *)cmd->output_buff_raw; + struct ras_log_batch_overview overview; + + if (cmd->input_size != sizeof(struct ras_cmd_cper_snapshot_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + ras_log_ring_get_batch_overview(ras_core, &overview); + + output_data->total_cper_num = overview.logged_batch_count; + output_data->start_cper_id = overview.first_batch_id; + output_data->latest_cper_id = overview.last_batch_id; + + output_data->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_cper_snapshot_rsp); + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_cper_records(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_cper_record_req *req = + (struct ras_cmd_cper_record_req *)cmd->input_buff_raw; + struct ras_cmd_cper_record_rsp *rsp = + (struct ras_cmd_cper_record_rsp *)cmd->output_buff_raw; + struct ras_log_info *trace[MAX_RECORD_PER_BATCH] = {0}; + struct ras_log_batch_overview overview; + uint32_t offset = 0, real_data_len = 0; + uint64_t batch_id; + uint8_t *buffer; + int ret = 0, i, count; + + if (cmd->input_size != sizeof(struct ras_cmd_cper_record_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if (!req->buf_size || !req->buf_ptr || !req->cper_num) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + buffer = kzalloc(req->buf_size, GFP_KERNEL); + if (!buffer) + return RAS_CMD__ERROR_GENERIC; + + ras_log_ring_get_batch_overview(ras_core, &overview); + for (i = 0; i < req->cper_num; i++) { + batch_id = req->cper_start_id + i; + if (batch_id >= overview.last_batch_id) + break; + + count = ras_log_ring_get_batch_records(ras_core, batch_id, trace, + ARRAY_SIZE(trace)); + if (count > 0) { + ret = ras_cper_generate_cper(ras_core, trace, count, + &buffer[offset], req->buf_size - offset, &real_data_len); + if (ret) + break; + + offset += real_data_len; + } + } + + if ((ret && (ret != -ENOMEM)) || + copy_to_user(u64_to_user_ptr(req->buf_ptr), buffer, offset)) { + kfree(buffer); + return RAS_CMD__ERROR_GENERIC; + } + + rsp->real_data_size = offset; + rsp->real_cper_num = i; + rsp->remain_num = (ret == -ENOMEM) ? (req->cper_num - i) : 0; + rsp->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_cper_record_rsp); + + kfree(buffer); + + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_batch_trace_snapshot(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_batch_trace_snapshot_rsp *rsp = + (struct ras_cmd_batch_trace_snapshot_rsp *)cmd->output_buff_raw; + struct ras_log_batch_overview overview; + + + if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_snapshot_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + ras_log_ring_get_batch_overview(ras_core, &overview); + + rsp->total_batch_num = overview.logged_batch_count; + rsp->start_batch_id = overview.first_batch_id; + rsp->latest_batch_id = overview.last_batch_id; + rsp->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_batch_trace_snapshot_rsp); + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_batch_trace_records(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_batch_trace_record_req *input_data = + (struct ras_cmd_batch_trace_record_req *)cmd->input_buff_raw; + struct ras_cmd_batch_trace_record_rsp *output_data = + (struct ras_cmd_batch_trace_record_rsp *)cmd->output_buff_raw; + struct ras_log_batch_overview overview; + struct ras_log_info *trace_arry[MAX_RECORD_PER_BATCH] = {0}; + struct ras_log_info *record; + int i, j, count = 0, offset = 0; + uint64_t id; + bool completed = false; + + if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_record_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if ((!input_data->batch_num) || (input_data->batch_num > RAS_CMD_MAX_BATCH_NUM)) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + ras_log_ring_get_batch_overview(ras_core, &overview); + if ((input_data->start_batch_id < overview.first_batch_id) || + (input_data->start_batch_id >= overview.last_batch_id)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + for (i = 0; i < input_data->batch_num; i++) { + id = input_data->start_batch_id + i; + if (id >= overview.last_batch_id) { + completed = true; + break; + } + + count = ras_log_ring_get_batch_records(ras_core, + id, trace_arry, ARRAY_SIZE(trace_arry)); + if (count > 0) { + if ((offset + count) > RAS_CMD_MAX_TRACE_NUM) + break; + for (j = 0; j < count; j++) { + record = &output_data->records[offset + j]; + record->seqno = trace_arry[j]->seqno; + record->timestamp = trace_arry[j]->timestamp; + record->event = trace_arry[j]->event; + memcpy(&record->aca_reg, + &trace_arry[j]->aca_reg, sizeof(trace_arry[j]->aca_reg)); + } + } else { + count = 0; + } + + output_data->batchs[i].batch_id = id; + output_data->batchs[i].offset = offset; + output_data->batchs[i].trace_num = count; + offset += count; + } + + output_data->start_batch_id = input_data->start_batch_id; + output_data->real_batch_num = i; + output_data->remain_num = completed ? 0 : (input_data->batch_num - i); + output_data->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_batch_trace_record_rsp); + + return RAS_CMD__SUCCESS; +} + +static enum ras_ta_block __get_ras_ta_block(enum ras_block_id block) +{ + switch (block) { + case RAS_BLOCK_ID__UMC: + return RAS_TA_BLOCK__UMC; + case RAS_BLOCK_ID__SDMA: + return RAS_TA_BLOCK__SDMA; + case RAS_BLOCK_ID__GFX: + return RAS_TA_BLOCK__GFX; + case RAS_BLOCK_ID__MMHUB: + return RAS_TA_BLOCK__MMHUB; + case RAS_BLOCK_ID__ATHUB: + return RAS_TA_BLOCK__ATHUB; + case RAS_BLOCK_ID__PCIE_BIF: + return RAS_TA_BLOCK__PCIE_BIF; + case RAS_BLOCK_ID__HDP: + return RAS_TA_BLOCK__HDP; + case RAS_BLOCK_ID__XGMI_WAFL: + return RAS_TA_BLOCK__XGMI_WAFL; + case RAS_BLOCK_ID__DF: + return RAS_TA_BLOCK__DF; + case RAS_BLOCK_ID__SMN: + return RAS_TA_BLOCK__SMN; + case RAS_BLOCK_ID__SEM: + return RAS_TA_BLOCK__SEM; + case RAS_BLOCK_ID__MP0: + return RAS_TA_BLOCK__MP0; + case RAS_BLOCK_ID__MP1: + return RAS_TA_BLOCK__MP1; + case RAS_BLOCK_ID__FUSE: + return RAS_TA_BLOCK__FUSE; + case RAS_BLOCK_ID__MCA: + return RAS_TA_BLOCK__MCA; + case RAS_BLOCK_ID__VCN: + return RAS_TA_BLOCK__VCN; + case RAS_BLOCK_ID__JPEG: + return RAS_TA_BLOCK__JPEG; + default: + return RAS_TA_BLOCK__UMC; + } +} + +static enum ras_ta_error_type __get_ras_ta_err_type(enum ras_ecc_err_type error) +{ + switch (error) { + case RAS_ECC_ERR__NONE: + return RAS_TA_ERROR__NONE; + case RAS_ECC_ERR__PARITY: + return RAS_TA_ERROR__PARITY; + case RAS_ECC_ERR__SINGLE_CORRECTABLE: + return RAS_TA_ERROR__SINGLE_CORRECTABLE; + case RAS_ECC_ERR__MULTI_UNCORRECTABLE: + return RAS_TA_ERROR__MULTI_UNCORRECTABLE; + case RAS_ECC_ERR__POISON: + return RAS_TA_ERROR__POISON; + default: + return RAS_TA_ERROR__NONE; + } +} + +static int ras_cmd_inject_error(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_inject_error_req *req = + (struct ras_cmd_inject_error_req *)cmd->input_buff_raw; + struct ras_cmd_inject_error_rsp *output_data = + (struct ras_cmd_inject_error_rsp *)cmd->output_buff_raw; + int ret = 0; + struct ras_ta_trigger_error_input block_info = { + .block_id = __get_ras_ta_block(req->block_id), + .sub_block_index = req->subblock_id, + .inject_error_type = __get_ras_ta_err_type(req->error_type), + .address = req->address, + .value = req->method, + }; + + ret = ras_psp_trigger_error(ras_core, &block_info, req->instance_mask); + if (!ret) { + output_data->version = 0; + output_data->address = block_info.address; + cmd->output_size = sizeof(struct ras_cmd_inject_error_rsp); + } else { + RAS_DEV_ERR(ras_core->dev, "ras inject block %u failed %d\n", req->block_id, ret); + ret = RAS_CMD__ERROR_ACCESS_DENIED; + } + + return ret; +} + +static struct ras_cmd_func_map ras_cmd_maps[] = { + {RAS_CMD__INJECT_ERROR, ras_cmd_inject_error}, + {RAS_CMD__GET_BLOCK_ECC_STATUS, ras_get_block_ecc_info}, + {RAS_CMD__GET_BAD_PAGES, ras_cmd_get_bad_pages}, + {RAS_CMD__CLEAR_BAD_PAGE_INFO, ras_cmd_clear_bad_page_info}, + {RAS_CMD__RESET_ALL_ERROR_COUNTS, ras_cmd_reset_all_error_counts}, + {RAS_CMD__GET_CPER_SNAPSHOT, ras_cmd_get_cper_snapshot}, + {RAS_CMD__GET_CPER_RECORD, ras_cmd_get_cper_records}, + {RAS_CMD__GET_BATCH_TRACE_SNAPSHOT, ras_cmd_get_batch_trace_snapshot}, + {RAS_CMD__GET_BATCH_TRACE_RECORD, ras_cmd_get_batch_trace_records}, +}; + +int rascore_handle_cmd(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_func_map *ras_cmd = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(ras_cmd_maps); i++) { + if (cmd->cmd_id == ras_cmd_maps[i].cmd_id) { + ras_cmd = &ras_cmd_maps[i]; + break; + } + } + + if (!ras_cmd) + return RAS_CMD__ERROR_UKNOWN_CMD; + + return ras_cmd->func(ras_core, cmd, data); +} + +int ras_cmd_init(struct ras_core_context *ras_core) +{ + return ras_cmd_add_device(ras_core); +} + +int ras_cmd_fini(struct ras_core_context *ras_core) +{ + ras_cmd_remove_device(ras_core); + return 0; +} + +int ras_cmd_query_interface_info(struct ras_core_context *ras_core, + struct ras_query_interface_info_rsp *rsp) +{ + rsp->ras_cmd_major_ver = RAS_CMD_MAJOR_VERSION; + rsp->ras_cmd_minor_ver = RAS_CMD_MINOR_VERSION; + + return 0; +} + +int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core, + uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr) +{ + struct umc_bank_addr umc_bank = {0}; + int ret; + + ret = ras_umc_translate_soc_pa_and_bank(ras_core, &soc_pa, &umc_bank, false); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + bank_addr->stack_id = umc_bank.stack_id; + bank_addr->bank_group = umc_bank.bank_group; + bank_addr->bank = umc_bank.bank; + bank_addr->row = umc_bank.row; + bank_addr->column = umc_bank.column; + bank_addr->channel = umc_bank.channel; + bank_addr->subchannel = umc_bank.subchannel; + + return 0; +} + +int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core, + struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa) +{ + struct umc_bank_addr umc_bank = {0}; + + umc_bank.stack_id = bank_addr.stack_id; + umc_bank.bank_group = bank_addr.bank_group; + umc_bank.bank = bank_addr.bank; + umc_bank.row = bank_addr.row; + umc_bank.column = bank_addr.column; + umc_bank.channel = bank_addr.channel; + umc_bank.subchannel = bank_addr.subchannel; + + return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, &umc_bank, true); +} + +uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core) +{ + return ras_core->ras_cmd.dev_handle; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h new file mode 100644 index 000000000000..48a0715eb821 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h @@ -0,0 +1,426 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_CMD_H__ +#define __RAS_CMD_H__ +#include "ras.h" +#include "ras_eeprom.h" +#include "ras_log_ring.h" +#include "ras_cper.h" + +#define RAS_CMD_DEV_HANDLE_MAGIC 0xFEEDAD00UL + +#define RAS_CMD_MAX_IN_SIZE 256 +#define RAS_CMD_MAX_GPU_NUM 32 +#define RAS_CMD_MAX_BAD_PAGES_PER_GROUP 32 + +/* position of instance value in sub_block_index of + * ta_ras_trigger_error_input, the sub block uses lower 12 bits + */ +#define RAS_TA_INST_MASK 0xfffff000 +#define RAS_TA_INST_SHIFT 0xc + +enum ras_cmd_interface_type { + RAS_CMD_INTERFACE_TYPE_NONE, + RAS_CMD_INTERFACE_TYPE_AMDGPU, + RAS_CMD_INTERFACE_TYPE_VF, + RAS_CMD_INTERFACE_TYPE_PF, +}; + +enum ras_cmd_id_range { + RAS_CMD_ID_COMMON_START = 0, + RAS_CMD_ID_COMMON_END = 0x10000, + RAS_CMD_ID_AMDGPU_START = RAS_CMD_ID_COMMON_END, + RAS_CMD_ID_AMDGPU_END = 0x20000, + RAS_CMD_ID_MXGPU_START = RAS_CMD_ID_AMDGPU_END, + RAS_CMD_ID_MXGPU_END = 0x30000, + RAS_CMD_ID_MXGPU_VF_START = RAS_CMD_ID_MXGPU_END, + RAS_CMD_ID_MXGPU_VF_END = 0x40000, +}; + +enum ras_cmd_id { + RAS_CMD__BEGIN = RAS_CMD_ID_COMMON_START, + RAS_CMD__QUERY_INTERFACE_INFO, + RAS_CMD__GET_DEVICES_INFO, + RAS_CMD__GET_BLOCK_ECC_STATUS, + RAS_CMD__INJECT_ERROR, + RAS_CMD__GET_BAD_PAGES, + RAS_CMD__CLEAR_BAD_PAGE_INFO, + RAS_CMD__RESET_ALL_ERROR_COUNTS, + RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES, + RAS_CMD__TRANSLATE_FB_ADDRESS, + RAS_CMD__GET_LINK_TOPOLOGY, + RAS_CMD__GET_CPER_SNAPSHOT, + RAS_CMD__GET_CPER_RECORD, + RAS_CMD__GET_BATCH_TRACE_SNAPSHOT, + RAS_CMD__GET_BATCH_TRACE_RECORD, + RAS_CMD__SUPPORTED_MAX = RAS_CMD_ID_COMMON_END, +}; + +enum ras_cmd_response { + RAS_CMD__SUCCESS = 0, + RAS_CMD__SUCCESS_EXEED_BUFFER, + RAS_CMD__ERROR_UKNOWN_CMD, + RAS_CMD__ERROR_INVALID_CMD, + RAS_CMD__ERROR_VERSION, + RAS_CMD__ERROR_INVALID_INPUT_SIZE, + RAS_CMD__ERROR_INVALID_INPUT_DATA, + RAS_CMD__ERROR_DRV_INIT_FAIL, + RAS_CMD__ERROR_ACCESS_DENIED, + RAS_CMD__ERROR_GENERIC, + RAS_CMD__ERROR_TIMEOUT, +}; + +enum ras_error_type { + RAS_TYPE_ERROR__NONE = 0, + RAS_TYPE_ERROR__PARITY = 1, + RAS_TYPE_ERROR__SINGLE_CORRECTABLE = 2, + RAS_TYPE_ERROR__MULTI_UNCORRECTABLE = 4, + RAS_TYPE_ERROR__POISON = 8, +}; + +struct ras_core_context; +struct ras_cmd_ctx; + +struct ras_cmd_mgr { + struct list_head head; + struct ras_core_context *ras_core; + uint64_t dev_handle; +}; + +struct ras_cmd_func_map { + uint32_t cmd_id; + int (*func)(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data); +}; + +struct ras_device_bdf { + union { + struct { + uint32_t function : 3; + uint32_t device : 5; + uint32_t bus : 8; + uint32_t domain : 16; + }; + uint32_t u32_all; + }; +}; + +struct ras_cmd_param { + uint32_t idx_vf; + void *data; +}; + +#pragma pack(push, 8) +struct ras_cmd_ctx { + uint32_t magic; + union { + struct { + uint16_t ras_cmd_minor_ver : 10; + uint16_t ras_cmd_major_ver : 6; + }; + uint16_t ras_cmd_ver; + }; + union { + struct { + uint16_t plat_major_ver : 10; + uint16_t plat_minor_ver : 6; + }; + uint16_t plat_ver; + }; + uint32_t cmd_id; + uint32_t cmd_res; + uint32_t input_size; + uint32_t output_size; + uint32_t output_buf_size; + uint32_t reserved[5]; + uint8_t input_buff_raw[RAS_CMD_MAX_IN_SIZE]; + uint8_t output_buff_raw[]; +}; + +struct ras_cmd_dev_handle { + uint64_t dev_handle; +}; + +struct ras_cmd_block_ecc_info_req { + struct ras_cmd_dev_handle dev; + uint32_t block_id; + uint32_t subblock_id; + uint32_t reserved[4]; +}; + +struct ras_cmd_block_ecc_info_rsp { + uint32_t version; + uint32_t ce_count; + uint32_t ue_count; + uint32_t de_count; + uint32_t reserved[6]; +}; + +struct ras_cmd_inject_error_req { + struct ras_cmd_dev_handle dev; + uint32_t block_id; + uint32_t subblock_id; + uint64_t address; + uint32_t error_type; + uint32_t instance_mask; + union { + struct { + /* vf index */ + uint64_t vf_idx : 6; + /* method of error injection. i.e persistent, coherent etc */ + uint64_t method : 10; + uint64_t rsv : 48; + }; + uint64_t value; + }; + uint32_t reserved[8]; +}; + +struct ras_cmd_inject_error_rsp { + uint32_t version; + uint32_t reserved[5]; + uint64_t address; +}; + +struct ras_cmd_dev_info { + uint64_t dev_handle; + uint32_t location_id; + uint32_t ecc_enabled; + uint32_t ecc_supported; + uint32_t vf_num; + uint32_t asic_type; + uint32_t oam_id; + uint32_t reserved[8]; +}; + +struct ras_cmd_devices_info_rsp { + uint32_t version; + uint32_t dev_num; + uint32_t reserved[6]; + struct ras_cmd_dev_info devs[RAS_CMD_MAX_GPU_NUM]; +}; + +struct ras_cmd_bad_page_record { + union { + uint64_t address; + uint64_t offset; + }; + uint64_t retired_page; + uint64_t ts; + + uint32_t err_type; + + union { + unsigned char bank; + unsigned char cu; + }; + + unsigned char mem_channel; + unsigned char mcumc_id; + + unsigned char valid; + unsigned char reserved[8]; +}; + +struct ras_cmd_bad_pages_info_req { + struct ras_cmd_dev_handle device; + uint32_t group_index; + uint32_t reserved[5]; +}; + +struct ras_cmd_bad_pages_info_rsp { + uint32_t version; + uint32_t group_index; + uint32_t bp_in_group; + uint32_t bp_total_cnt; + uint32_t reserved[4]; + struct ras_cmd_bad_page_record records[RAS_CMD_MAX_BAD_PAGES_PER_GROUP]; +}; + +struct ras_query_interface_info_req { + uint32_t reserved[8]; +}; + +struct ras_query_interface_info_rsp { + uint32_t version; + uint32_t ras_cmd_major_ver; + uint32_t ras_cmd_minor_ver; + uint32_t plat_major_ver; + uint32_t plat_minor_ver; + uint8_t interface_type; + uint8_t rsv[3]; + uint32_t reserved[8]; +}; + +#define RAS_MAX_NUM_SAFE_RANGES 64 +struct ras_cmd_ras_safe_fb_address_ranges_rsp { + uint32_t version; + uint32_t num_ranges; + uint32_t reserved[4]; + struct { + uint64_t start; + uint64_t size; + uint32_t idx; + uint32_t reserved[3]; + } range[RAS_MAX_NUM_SAFE_RANGES]; +}; + +enum ras_fb_addr_type { + RAS_FB_ADDR_SOC_PHY, /* SPA */ + RAS_FB_ADDR_BANK, + RAS_FB_ADDR_VF_PHY, /* GPA */ + RAS_FB_ADDR_UNKNOWN +}; + +struct ras_fb_bank_addr { + uint32_t stack_id; /* SID */ + uint32_t bank_group; + uint32_t bank; + uint32_t row; + uint32_t column; + uint32_t channel; + uint32_t subchannel; /* Also called Pseudochannel (PC) */ + uint32_t reserved[3]; +}; + +struct ras_fb_vf_phy_addr { + uint32_t vf_idx; + uint32_t reserved; + uint64_t addr; +}; + +union ras_translate_fb_address { + struct ras_fb_bank_addr bank_addr; + uint64_t soc_phy_addr; + struct ras_fb_vf_phy_addr vf_phy_addr; +}; + +struct ras_cmd_translate_fb_address_req { + struct ras_cmd_dev_handle dev; + enum ras_fb_addr_type src_addr_type; + enum ras_fb_addr_type dest_addr_type; + union ras_translate_fb_address trans_addr; +}; + +struct ras_cmd_translate_fb_address_rsp { + uint32_t version; + uint32_t reserved[5]; + union ras_translate_fb_address trans_addr; +}; + +struct ras_dev_link_topology_req { + struct ras_cmd_dev_handle src; + struct ras_cmd_dev_handle dst; +}; + +struct ras_dev_link_topology_rsp { + uint32_t version; + uint32_t link_status; /* HW status of the link */ + uint32_t link_type; /* type of the link */ + uint32_t num_hops; /* number of hops */ + uint32_t reserved[8]; +}; + +struct ras_cmd_cper_snapshot_req { + struct ras_cmd_dev_handle dev; +}; + +struct ras_cmd_cper_snapshot_rsp { + uint32_t version; + uint32_t reserved[4]; + uint32_t total_cper_num; + uint64_t start_cper_id; + uint64_t latest_cper_id; +}; + +struct ras_cmd_cper_record_req { + struct ras_cmd_dev_handle dev; + uint64_t cper_start_id; + uint32_t cper_num; + uint32_t buf_size; + uint64_t buf_ptr; + uint32_t reserved[4]; +}; + +struct ras_cmd_cper_record_rsp { + uint32_t version; + uint32_t real_data_size; + uint32_t real_cper_num; + uint32_t remain_num; + uint32_t reserved[4]; +}; + +struct ras_cmd_batch_trace_snapshot_req { + struct ras_cmd_dev_handle dev; +}; + +struct ras_cmd_batch_trace_snapshot_rsp { + uint32_t version; + uint32_t reserved[4]; + uint32_t total_batch_num; + uint64_t start_batch_id; + uint64_t latest_batch_id; +}; + +struct ras_cmd_batch_trace_record_req { + struct ras_cmd_dev_handle dev; + uint64_t start_batch_id; + uint32_t batch_num; + uint32_t reserved[5]; +}; + +struct batch_ras_trace_info { + uint64_t batch_id; + uint16_t offset; + uint8_t trace_num; + uint8_t rsv; + uint32_t reserved; +}; + +#define RAS_CMD_MAX_BATCH_NUM 300 +#define RAS_CMD_MAX_TRACE_NUM 300 +struct ras_cmd_batch_trace_record_rsp { + uint32_t version; + uint16_t real_batch_num; + uint16_t remain_num; + uint64_t start_batch_id; + uint32_t reserved[2]; + struct batch_ras_trace_info batchs[RAS_CMD_MAX_BATCH_NUM]; + struct ras_log_info records[RAS_CMD_MAX_TRACE_NUM]; +}; + +#pragma pack(pop) + +int ras_cmd_init(struct ras_core_context *ras_core); +int ras_cmd_fini(struct ras_core_context *ras_core); +int rascore_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data); +uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core); +int ras_cmd_query_interface_info(struct ras_core_context *ras_core, + struct ras_query_interface_info_rsp *rsp); +int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core, + uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr); +int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core, + struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core.c b/drivers/gpu/drm/amd/ras/rascore/ras_core.c new file mode 100644 index 000000000000..01122b55c98a --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_core.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_core_status.h" + +#define RAS_SEQNO_FIFO_SIZE (128 * sizeof(uint64_t)) + +#define IS_LEAP_YEAR(x) ((x % 4 == 0 && x % 100 != 0) || x % 400 == 0) + +static const char * const ras_block_name[] = { + "umc", + "sdma", + "gfx", + "mmhub", + "athub", + "pcie_bif", + "hdp", + "xgmi_wafl", + "df", + "smn", + "sem", + "mp0", + "mp1", + "fuse", + "mca", + "vcn", + "jpeg", + "ih", + "mpio", +}; + +const char *ras_core_get_ras_block_name(enum ras_block_id block_id) +{ + if (block_id >= ARRAY_SIZE(ras_block_name)) + return ""; + + return ras_block_name[block_id]; +} + +int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core, + uint64_t timestamp, struct ras_time *tm) +{ + int days_in_month[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + uint64_t month = 0, day = 0, hour = 0, minute = 0, second = 0; + uint32_t year = 0; + int seconds_per_day = 24 * 60 * 60; + int seconds_per_hour = 60 * 60; + int seconds_per_minute = 60; + int days, remaining_seconds; + + days = div64_u64_rem(timestamp, seconds_per_day, (uint64_t *)&remaining_seconds); + + /* utc_timestamp follows the Unix epoch */ + year = 1970; + while (days >= 365) { + if (IS_LEAP_YEAR(year)) { + if (days < 366) + break; + days -= 366; + } else { + days -= 365; + } + year++; + } + + days_in_month[1] += IS_LEAP_YEAR(year); + + month = 0; + while (days >= days_in_month[month]) { + days -= days_in_month[month]; + month++; + } + month++; + day = days + 1; + + if (remaining_seconds) { + hour = remaining_seconds / seconds_per_hour; + minute = (remaining_seconds % seconds_per_hour) / seconds_per_minute; + second = remaining_seconds % seconds_per_minute; + } + + tm->tm_year = year; + tm->tm_mon = month; + tm->tm_mday = day; + tm->tm_hour = hour; + tm->tm_min = minute; + tm->tm_sec = second; + + return 0; +} + +bool ras_core_gpu_in_reset(struct ras_core_context *ras_core) +{ + uint32_t status = 0; + + if (ras_core->sys_fn && + ras_core->sys_fn->check_gpu_status) + ras_core->sys_fn->check_gpu_status(ras_core, &status); + + return (status & RAS_GPU_STATUS__IN_RESET) ? true : false; +} + +bool ras_core_gpu_is_vf(struct ras_core_context *ras_core) +{ + uint32_t status = 0; + + if (ras_core->sys_fn && + ras_core->sys_fn->check_gpu_status) + ras_core->sys_fn->check_gpu_status(ras_core, &status); + + return (status & RAS_GPU_STATUS__IS_VF) ? true : false; +} + +bool ras_core_gpu_is_rma(struct ras_core_context *ras_core) +{ + if (!ras_core) + return false; + + return ras_core->is_rma; +} + +static int ras_core_seqno_fifo_write(struct ras_core_context *ras_core, + enum ras_seqno_fifo fifo_type, uint64_t seqno) +{ + int ret = 0; + struct kfifo *seqno_fifo = NULL; + + if (fifo_type == SEQNO_FIFO_POISON_CREATION) + seqno_fifo = &ras_core->de_seqno_fifo; + else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION) + seqno_fifo = &ras_core->consumption_seqno_fifo; + + if (seqno_fifo) + ret = kfifo_in_spinlocked(seqno_fifo, + &seqno, sizeof(seqno), &ras_core->seqno_lock); + + return ret ? 0 : -EINVAL; +} + +static int ras_core_seqno_fifo_read(struct ras_core_context *ras_core, + enum ras_seqno_fifo fifo_type, uint64_t *seqno, bool pop) +{ + int ret = 0; + struct kfifo *seqno_fifo = NULL; + + if (fifo_type == SEQNO_FIFO_POISON_CREATION) + seqno_fifo = &ras_core->de_seqno_fifo; + else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION) + seqno_fifo = &ras_core->consumption_seqno_fifo; + + if (seqno_fifo) { + if (pop) + ret = kfifo_out_spinlocked(seqno_fifo, + seqno, sizeof(*seqno), &ras_core->seqno_lock); + else + ret = kfifo_out_peek(seqno_fifo, seqno, sizeof(*seqno)); + } + + return ret ? 0 : -EINVAL; +} + +uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type type) +{ + uint64_t seqno = 0; + + if (ras_core->sys_fn && + ras_core->sys_fn->gen_seqno) + ras_core->sys_fn->gen_seqno(ras_core, type, &seqno); + + return seqno; +} + +int ras_core_put_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t seqno) +{ + int ret = 0; + + if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX) + return -EINVAL; + + if (seqno_type == RAS_SEQNO_TYPE_DE) + ret = ras_core_seqno_fifo_write(ras_core, + SEQNO_FIFO_POISON_CREATION, seqno); + else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION) + ret = ras_core_seqno_fifo_write(ras_core, + SEQNO_FIFO_POISON_CONSUMPTION, seqno); + else + ret = -EINVAL; + + return ret; +} + +uint64_t ras_core_get_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, bool pop) +{ + uint64_t seq_no; + int ret = -ENODATA; + + if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX) + return 0; + + if (seqno_type == RAS_SEQNO_TYPE_DE) + ret = ras_core_seqno_fifo_read(ras_core, + SEQNO_FIFO_POISON_CREATION, &seq_no, pop); + else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION) + ret = ras_core_seqno_fifo_read(ras_core, + SEQNO_FIFO_POISON_CONSUMPTION, &seq_no, pop); + + if (ret) + seq_no = ras_core_gen_seqno(ras_core, seqno_type); + + return seq_no; +} + +static int ras_core_eeprom_recovery(struct ras_core_context *ras_core) +{ + int count; + int ret; + + count = ras_eeprom_get_record_count(ras_core); + if (!count) + return 0; + + /* Avoid bad page to be loaded again after gpu reset */ + if (ras_umc_get_saved_eeprom_count(ras_core) >= count) + return 0; + + ret = ras_umc_load_bad_pages(ras_core); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "ras_umc_load_bad_pages failed: %d\n", ret); + return ret; + } + + ras_eeprom_sync_info(ras_core); + + return ret; +} + +struct ras_core_context *ras_core_create(struct ras_core_config *init_config) +{ + struct ras_core_context *ras_core; + struct ras_core_config *config; + + ras_core = kzalloc(sizeof(*ras_core), GFP_KERNEL); + if (!ras_core) + return NULL; + + config = kzalloc(sizeof(*config), GFP_KERNEL); + if (!config) { + kfree(ras_core); + return NULL; + } + + memcpy(config, init_config, sizeof(*config)); + ras_core->config = config; + + return ras_core; +} + +void ras_core_destroy(struct ras_core_context *ras_core) +{ + if (ras_core) + kfree(ras_core->config); + + kfree(ras_core); +} + +int ras_core_sw_init(struct ras_core_context *ras_core) +{ + int ret; + + if (!ras_core->config) { + RAS_DEV_ERR(ras_core->dev, "No ras core config!\n"); + return -EINVAL; + } + + ras_core->sys_fn = ras_core->config->sys_fn; + if (!ras_core->sys_fn) + return -EINVAL; + + ret = kfifo_alloc(&ras_core->de_seqno_fifo, + RAS_SEQNO_FIFO_SIZE, GFP_KERNEL); + if (ret) + return ret; + + ret = kfifo_alloc(&ras_core->consumption_seqno_fifo, + RAS_SEQNO_FIFO_SIZE, GFP_KERNEL); + if (ret) + return ret; + + spin_lock_init(&ras_core->seqno_lock); + + ret = ras_aca_sw_init(ras_core); + if (ret) + return ret; + + ret = ras_umc_sw_init(ras_core); + if (ret) + return ret; + + ret = ras_cmd_init(ras_core); + if (ret) + return ret; + + ret = ras_log_ring_sw_init(ras_core); + if (ret) + return ret; + + ret = ras_psp_sw_init(ras_core); + if (ret) + return ret; + + return 0; +} + +int ras_core_sw_fini(struct ras_core_context *ras_core) +{ + kfifo_free(&ras_core->de_seqno_fifo); + kfifo_free(&ras_core->consumption_seqno_fifo); + + ras_psp_sw_fini(ras_core); + ras_log_ring_sw_fini(ras_core); + ras_cmd_fini(ras_core); + ras_umc_sw_fini(ras_core); + ras_aca_sw_fini(ras_core); + + return 0; +} + +int ras_core_hw_init(struct ras_core_context *ras_core) +{ + int ret; + + ras_core->ras_eeprom_supported = + ras_core->config->ras_eeprom_supported; + + ras_core->poison_supported = ras_core->config->poison_supported; + + ret = ras_psp_hw_init(ras_core); + if (ret) + return ret; + + ret = ras_aca_hw_init(ras_core); + if (ret) + goto init_err1; + + ret = ras_mp1_hw_init(ras_core); + if (ret) + goto init_err2; + + ret = ras_nbio_hw_init(ras_core); + if (ret) + goto init_err3; + + ret = ras_umc_hw_init(ras_core); + if (ret) + goto init_err4; + + ret = ras_gfx_hw_init(ras_core); + if (ret) + goto init_err5; + + ret = ras_eeprom_hw_init(ras_core); + if (ret) + goto init_err6; + + ret = ras_core_eeprom_recovery(ras_core); + if (ret) { + RAS_DEV_ERR(ras_core->dev, + "Failed to recovery ras core, ret:%d\n", ret); + goto init_err6; + } + + ret = ras_eeprom_check_storage_status(ras_core); + if (ret) + goto init_err6; + + ret = ras_process_init(ras_core); + if (ret) + goto init_err7; + + ras_core->is_initialized = true; + + return 0; + +init_err7: + ras_eeprom_hw_fini(ras_core); +init_err6: + ras_gfx_hw_fini(ras_core); +init_err5: + ras_umc_hw_fini(ras_core); +init_err4: + ras_nbio_hw_fini(ras_core); +init_err3: + ras_mp1_hw_fini(ras_core); +init_err2: + ras_aca_hw_fini(ras_core); +init_err1: + ras_psp_hw_fini(ras_core); + return ret; +} + +int ras_core_hw_fini(struct ras_core_context *ras_core) +{ + ras_core->is_initialized = false; + + ras_process_fini(ras_core); + ras_eeprom_hw_fini(ras_core); + ras_gfx_hw_fini(ras_core); + ras_nbio_hw_fini(ras_core); + ras_umc_hw_fini(ras_core); + ras_mp1_hw_fini(ras_core); + ras_aca_hw_fini(ras_core); + ras_psp_hw_fini(ras_core); + + return 0; +} + +bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data) +{ + return ras_nbio_handle_irq_error(ras_core, data); +} + +int ras_core_handle_fatal_error(struct ras_core_context *ras_core) +{ + int ret = 0; + + ras_aca_mark_fatal_flag(ras_core); + + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__FATAL_ERROR_DETECTED, NULL); + + return ret; +} + +uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core) +{ + if (ras_core->ras_nbio.ip_func && + ras_core->ras_nbio.ip_func->get_memory_partition_mode) + return ras_core->ras_nbio.ip_func->get_memory_partition_mode(ras_core); + + RAS_DEV_ERR(ras_core->dev, "Failed to get gpu memory nps mode!\n"); + return 0; +} + +int ras_core_update_ecc_info(struct ras_core_context *ras_core) +{ + int ret; + + ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__CE, NULL); + if (!ret) + ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__UE, NULL); + + return ret; +} + +int ras_core_query_block_ecc_data(struct ras_core_context *ras_core, + enum ras_block_id block, struct ras_ecc_count *ecc_count) +{ + int ret; + + if (!ecc_count || (block >= RAS_BLOCK_ID__LAST) || !ras_core) + return -EINVAL; + + ret = ras_aca_get_block_ecc_count(ras_core, block, ecc_count); + if (!ret) + ras_aca_clear_block_new_ecc_count(ras_core, block); + + return ret; +} + +int ras_core_set_status(struct ras_core_context *ras_core, bool enable) +{ + ras_core->ras_core_enabled = enable; + + return 0; +} + +bool ras_core_is_enabled(struct ras_core_context *ras_core) +{ + return ras_core->ras_core_enabled; +} + +uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->get_utc_second_timestamp) + return ras_core->sys_fn->get_utc_second_timestamp(ras_core); + + RAS_DEV_ERR(ras_core->dev, "Failed to get system timestamp!\n"); + return 0; +} + +int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa) +{ + if (!ras_core || !soc_pa || !bank_addr) + return -EINVAL; + + return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, bank_addr, bank_to_pa); +} + +bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->detect_ras_interrupt) + return ras_core->sys_fn->detect_ras_interrupt(ras_core); + + RAS_DEV_ERR(ras_core->dev, "Failed to detect ras interrupt!\n"); + return false; +} + +int ras_core_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + if (ras_core->sys_fn && ras_core->sys_fn->get_gpu_mem) + return ras_core->sys_fn->get_gpu_mem(ras_core, mem_type, gpu_mem); + + RAS_DEV_ERR(ras_core->dev, "Not config get gpu memory API!\n"); + return -EACCES; +} + +int ras_core_put_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + if (ras_core->sys_fn && ras_core->sys_fn->put_gpu_mem) + return ras_core->sys_fn->put_gpu_mem(ras_core, mem_type, gpu_mem); + + RAS_DEV_ERR(ras_core->dev, "Not config put gpu memory API!!\n"); + return -EACCES; +} + +bool ras_core_is_ready(struct ras_core_context *ras_core) +{ + return ras_core ? ras_core->is_initialized : false; +} + +bool ras_core_check_safety_watermark(struct ras_core_context *ras_core) +{ + return ras_eeprom_check_safety_watermark(ras_core); +} + +int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core) +{ + if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock) + return ras_core->sys_fn->gpu_reset_lock(ras_core, true, true); + + return 1; +} + +void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core) +{ + if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock) + ras_core->sys_fn->gpu_reset_lock(ras_core, true, false); +} + +void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core) +{ + if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock) + ras_core->sys_fn->gpu_reset_lock(ras_core, false, false); +} + +int ras_core_event_notify(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->ras_notifier) + return ras_core->sys_fn->ras_notifier(ras_core, event_id, data); + + return -RAS_CORE_NOT_SUPPORTED; +} + +int ras_core_get_device_system_info(struct ras_core_context *ras_core, + struct device_system_info *dev_info) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->get_device_system_info) + return ras_core->sys_fn->get_device_system_info(ras_core, dev_info); + + return -RAS_CORE_NOT_SUPPORTED; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.c b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c new file mode 100644 index 000000000000..0fc7522b7ab6 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_core_status.h" +#include "ras_log_ring.h" +#include "ras_cper.h" + +static const struct ras_cper_guid MCE = CPER_NOTIFY__MCE; +static const struct ras_cper_guid CMC = CPER_NOTIFY__CMC; +static const struct ras_cper_guid BOOT = BOOT__TYPE; + +static const struct ras_cper_guid CRASHDUMP = GPU__CRASHDUMP; +static const struct ras_cper_guid RUNTIME = GPU__NONSTANDARD_ERROR; + +static void cper_get_timestamp(struct ras_core_context *ras_core, + struct ras_cper_timestamp *timestamp, uint64_t utc_second_timestamp) +{ + struct ras_time tm = {0}; + + ras_core_convert_timestamp_to_time(ras_core, utc_second_timestamp, &tm); + timestamp->seconds = tm.tm_sec; + timestamp->minutes = tm.tm_min; + timestamp->hours = tm.tm_hour; + timestamp->flag = 0; + timestamp->day = tm.tm_mday; + timestamp->month = tm.tm_mon; + timestamp->year = tm.tm_year % 100; + timestamp->century = tm.tm_year / 100; +} + +static void fill_section_hdr(struct ras_core_context *ras_core, + struct cper_section_hdr *hdr, enum ras_cper_type type, + enum ras_cper_severity sev, struct ras_log_info *trace) +{ + struct device_system_info dev_info = {0}; + char record_id[32]; + + hdr->signature[0] = 'C'; + hdr->signature[1] = 'P'; + hdr->signature[2] = 'E'; + hdr->signature[3] = 'R'; + hdr->revision = CPER_HDR__REV_1; + hdr->signature_end = 0xFFFFFFFF; + hdr->error_severity = (sev == RAS_CPER_SEV_RMA ? RAS_CPER_SEV_FATAL_UE : sev); + + hdr->valid_bits.platform_id = 1; + hdr->valid_bits.timestamp = 1; + + ras_core_get_device_system_info(ras_core, &dev_info); + + cper_get_timestamp(ras_core, &hdr->timestamp, trace->timestamp); + + snprintf(record_id, sizeof(record_id), "%d:%llX", dev_info.socket_id, + RAS_LOG_SEQNO_TO_BATCH_IDX(trace->seqno)); + memcpy(hdr->record_id, record_id, 8); + + snprintf(hdr->platform_id, 16, "0x%04X:0x%04X", + dev_info.vendor_id, dev_info.device_id); + /* pmfw version should be part of creator_id according to CPER spec */ + snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID__AMDGPU); + + switch (type) { + case RAS_CPER_TYPE_BOOT: + hdr->notify_type = BOOT; + break; + case RAS_CPER_TYPE_FATAL: + case RAS_CPER_TYPE_RMA: + hdr->notify_type = MCE; + break; + case RAS_CPER_TYPE_RUNTIME: + if (sev == RAS_CPER_SEV_NON_FATAL_CE) + hdr->notify_type = CMC; + else + hdr->notify_type = MCE; + break; + default: + RAS_DEV_ERR(ras_core->dev, "Unknown CPER Type\n"); + break; + } +} + +static int fill_section_descriptor(struct ras_core_context *ras_core, + struct cper_section_descriptor *descriptor, + enum ras_cper_severity sev, + struct ras_cper_guid sec_type, + uint32_t section_offset, + uint32_t section_length) +{ + struct device_system_info dev_info = {0}; + + descriptor->revision_minor = CPER_SEC__MINOR_REV_1; + descriptor->revision_major = CPER_SEC__MAJOR_REV_22; + descriptor->sec_offset = section_offset; + descriptor->sec_length = section_length; + descriptor->valid_bits.fru_text = 1; + descriptor->flag_bits.primary = 1; + descriptor->severity = (sev == RAS_CPER_SEV_RMA ? RAS_CPER_SEV_FATAL_UE : sev); + descriptor->sec_type = sec_type; + + ras_core_get_device_system_info(ras_core, &dev_info); + + snprintf(descriptor->fru_text, 20, "OAM%d", dev_info.socket_id); + + if (sev == RAS_CPER_SEV_RMA) + descriptor->flag_bits.exceed_err_threshold = 1; + + if (sev == RAS_CPER_SEV_NON_FATAL_UE) + descriptor->flag_bits.latent_err = 1; + + return 0; +} + +static int fill_section_fatal(struct ras_core_context *ras_core, + struct cper_section_fatal *fatal, struct ras_log_info *trace) +{ + fatal->data.reg_ctx_type = CPER_CTX_TYPE__CRASH; + fatal->data.reg_arr_size = sizeof(fatal->data.reg); + + fatal->data.reg.status = trace->aca_reg.regs[RAS_CPER_ACA_REG_STATUS]; + fatal->data.reg.addr = trace->aca_reg.regs[RAS_CPER_ACA_REG_ADDR]; + fatal->data.reg.ipid = trace->aca_reg.regs[RAS_CPER_ACA_REG_IPID]; + fatal->data.reg.synd = trace->aca_reg.regs[RAS_CPER_ACA_REG_SYND]; + + return 0; +} + +static int fill_section_runtime(struct ras_core_context *ras_core, + struct cper_section_runtime *runtime, struct ras_log_info *trace, + enum ras_cper_severity sev) +{ + runtime->hdr.valid_bits.err_info_cnt = 1; + runtime->hdr.valid_bits.err_context_cnt = 1; + + runtime->descriptor.error_type = RUNTIME; + runtime->descriptor.ms_chk_bits.err_type_valid = 1; + if (sev == RAS_CPER_SEV_RMA) { + runtime->descriptor.valid_bits.ms_chk = 1; + runtime->descriptor.ms_chk_bits.err_type = 1; + runtime->descriptor.ms_chk_bits.pcc = 1; + } + + runtime->reg.reg_ctx_type = CPER_CTX_TYPE__CRASH; + runtime->reg.reg_arr_size = sizeof(runtime->reg.reg_dump); + + runtime->reg.reg_dump[RAS_CPER_ACA_REG_CTL] = trace->aca_reg.regs[ACA_REG_IDX__CTL]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_STATUS] = trace->aca_reg.regs[ACA_REG_IDX__STATUS]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_ADDR] = trace->aca_reg.regs[ACA_REG_IDX__ADDR]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_MISC0] = trace->aca_reg.regs[ACA_REG_IDX__MISC0]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_CONFIG] = trace->aca_reg.regs[ACA_REG_IDX__CONFG]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_IPID] = trace->aca_reg.regs[ACA_REG_IDX__IPID]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_SYND] = trace->aca_reg.regs[ACA_REG_IDX__SYND]; + + return 0; +} + +static int cper_generate_runtime_record(struct ras_core_context *ras_core, + struct cper_section_hdr *hdr, struct ras_log_info **trace_arr, uint32_t arr_num, + enum ras_cper_severity sev) +{ + struct cper_section_descriptor *descriptor; + struct cper_section_runtime *runtime; + int i; + + fill_section_hdr(ras_core, hdr, RAS_CPER_TYPE_RUNTIME, sev, trace_arr[0]); + hdr->record_length = RAS_HDR_LEN + ((RAS_SEC_DESC_LEN + RAS_NONSTD_SEC_LEN) * arr_num); + hdr->sec_cnt = arr_num; + for (i = 0; i < arr_num; i++) { + descriptor = (struct cper_section_descriptor *)((uint8_t *)hdr + + RAS_SEC_DESC_OFFSET(i)); + runtime = (struct cper_section_runtime *)((uint8_t *)hdr + + RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i)); + + fill_section_descriptor(ras_core, descriptor, sev, RUNTIME, + RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i), + sizeof(struct cper_section_runtime)); + fill_section_runtime(ras_core, runtime, trace_arr[i], sev); + } + + return 0; +} + +static int cper_generate_fatal_record(struct ras_core_context *ras_core, + uint8_t *buffer, struct ras_log_info **trace_arr, uint32_t arr_num) +{ + struct ras_cper_fatal_record record = {0}; + int i = 0; + + for (i = 0; i < arr_num; i++) { + fill_section_hdr(ras_core, &record.hdr, RAS_CPER_TYPE_FATAL, + RAS_CPER_SEV_FATAL_UE, trace_arr[i]); + record.hdr.record_length = RAS_HDR_LEN + RAS_SEC_DESC_LEN + RAS_FATAL_SEC_LEN; + record.hdr.sec_cnt = 1; + + fill_section_descriptor(ras_core, &record.descriptor, RAS_CPER_SEV_FATAL_UE, + CRASHDUMP, offsetof(struct ras_cper_fatal_record, fatal), + sizeof(struct cper_section_fatal)); + + fill_section_fatal(ras_core, &record.fatal, trace_arr[i]); + + memcpy(buffer + (i * record.hdr.record_length), + &record, record.hdr.record_length); + } + + return 0; +} + +static int cper_get_record_size(enum ras_cper_type type, uint16_t section_count) +{ + int size = 0; + + size += RAS_HDR_LEN; + size += (RAS_SEC_DESC_LEN * section_count); + + switch (type) { + case RAS_CPER_TYPE_RUNTIME: + case RAS_CPER_TYPE_RMA: + size += (RAS_NONSTD_SEC_LEN * section_count); + break; + case RAS_CPER_TYPE_FATAL: + size += (RAS_FATAL_SEC_LEN * section_count); + size += (RAS_HDR_LEN * (section_count - 1)); + break; + case RAS_CPER_TYPE_BOOT: + size += (RAS_BOOT_SEC_LEN * section_count); + break; + default: + /* should never reach here */ + break; + } + + return size; +} + +static enum ras_cper_type cper_ras_log_event_to_cper_type(enum ras_log_event event) +{ + switch (event) { + case RAS_LOG_EVENT_UE: + return RAS_CPER_TYPE_FATAL; + case RAS_LOG_EVENT_DE: + case RAS_LOG_EVENT_CE: + case RAS_LOG_EVENT_POISON_CREATION: + case RAS_LOG_EVENT_POISON_CONSUMPTION: + return RAS_CPER_TYPE_RUNTIME; + case RAS_LOG_EVENT_RMA: + return RAS_CPER_TYPE_RMA; + default: + /* should never reach here */ + return RAS_CPER_TYPE_RUNTIME; + } +} + +int ras_cper_generate_cper(struct ras_core_context *ras_core, + struct ras_log_info **trace_list, uint32_t count, + uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len) +{ + uint8_t *buffer = buf; + uint64_t buf_size = buf_len; + int record_size, saved_size = 0; + struct cper_section_hdr *hdr; + + /* All the batch traces share the same event */ + record_size = cper_get_record_size( + cper_ras_log_event_to_cper_type(trace_list[0]->event), count); + + if ((record_size + saved_size) > buf_size) + return -ENOMEM; + + hdr = (struct cper_section_hdr *)(buffer + saved_size); + + switch (trace_list[0]->event) { + case RAS_LOG_EVENT_RMA: + cper_generate_runtime_record(ras_core, hdr, trace_list, count, RAS_CPER_SEV_RMA); + break; + case RAS_LOG_EVENT_DE: + cper_generate_runtime_record(ras_core, + hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_UE); + break; + case RAS_LOG_EVENT_CE: + cper_generate_runtime_record(ras_core, + hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_CE); + break; + case RAS_LOG_EVENT_UE: + cper_generate_fatal_record(ras_core, buffer + saved_size, trace_list, count); + break; + default: + RAS_DEV_WARN(ras_core->dev, "Unprocessed trace event: %d\n", trace_list[0]->event); + break; + } + + saved_size += record_size; + + *real_data_len = saved_size; + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.h b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h new file mode 100644 index 000000000000..076c1883c1ce --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_CPER_H__ +#define __RAS_CPER_H__ + +#define CPER_UUID_MAX_SIZE 16 +struct ras_cper_guid { + uint8_t b[CPER_UUID_MAX_SIZE]; +}; + +#define CPER_GUID__INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ + ((struct ras_cper_guid) \ + {{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ + (b) & 0xff, ((b) >> 8) & 0xff, \ + (c) & 0xff, ((c) >> 8) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +#define CPER_HDR__REV_1 (0x100) +#define CPER_SEC__MINOR_REV_1 (0x01) +#define CPER_SEC__MAJOR_REV_22 (0x22) +#define CPER_OAM_MAX_COUNT (8) + +#define CPER_CTX_TYPE__CRASH (1) +#define CPER_CTX_TYPE__BOOT (9) + +#define CPER_CREATOR_ID__AMDGPU "amdgpu" + +#define CPER_NOTIFY__MCE \ + CPER_GUID__INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \ + 0xE1, 0x49, 0x13, 0xBB) +#define CPER_NOTIFY__CMC \ + CPER_GUID__INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ + 0xEB, 0xD4, 0xF8, 0x90) +#define BOOT__TYPE \ + CPER_GUID__INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \ + 0xD4, 0x64, 0xB3, 0x8F) + +#define GPU__CRASHDUMP \ + CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0xB0, 0xD0, 0x73, 0x65, \ + 0x72, 0x5F, 0xD6, 0xAE) +#define GPU__NONSTANDARD_ERROR \ + CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0x81, 0xA2, 0xAC, 0x69, \ + 0x17, 0x80, 0x55, 0x1D) +#define PROC_ERR__SECTION_TYPE \ + CPER_GUID__INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \ + 0x24, 0x2B, 0x6E, 0x1D) + +enum ras_cper_type { + RAS_CPER_TYPE_RUNTIME, + RAS_CPER_TYPE_FATAL, + RAS_CPER_TYPE_BOOT, + RAS_CPER_TYPE_RMA, +}; + +enum ras_cper_severity { + RAS_CPER_SEV_NON_FATAL_UE = 0, + RAS_CPER_SEV_FATAL_UE = 1, + RAS_CPER_SEV_NON_FATAL_CE = 2, + RAS_CPER_SEV_RMA = 3, + + RAS_CPER_SEV_UNUSED = 10, +}; + +enum ras_cper_aca_reg { + RAS_CPER_ACA_REG_CTL = 0, + RAS_CPER_ACA_REG_STATUS = 1, + RAS_CPER_ACA_REG_ADDR = 2, + RAS_CPER_ACA_REG_MISC0 = 3, + RAS_CPER_ACA_REG_CONFIG = 4, + RAS_CPER_ACA_REG_IPID = 5, + RAS_CPER_ACA_REG_SYND = 6, + RAS_CPER_ACA_REG_DESTAT = 8, + RAS_CPER_ACA_REG_DEADDR = 9, + RAS_CPER_ACA_REG_MASK = 10, + + RAS_CPER_ACA_REG_COUNT = 16, +}; + +#pragma pack(push, 1) + +struct ras_cper_timestamp { + uint8_t seconds; + uint8_t minutes; + uint8_t hours; + uint8_t flag; + uint8_t day; + uint8_t month; + uint8_t year; + uint8_t century; +}; + +struct cper_section_hdr { + char signature[4]; /* "CPER" */ + uint16_t revision; + uint32_t signature_end; /* 0xFFFFFFFF */ + uint16_t sec_cnt; + enum ras_cper_severity error_severity; + union { + struct { + uint32_t platform_id : 1; + uint32_t timestamp : 1; + uint32_t partition_id : 1; + uint32_t reserved : 29; + } valid_bits; + uint32_t valid_mask; + }; + uint32_t record_length; /* Total size of CPER Entry */ + struct ras_cper_timestamp timestamp; + char platform_id[16]; + struct ras_cper_guid partition_id; /* Reserved */ + char creator_id[16]; + struct ras_cper_guid notify_type; /* CMC, MCE */ + char record_id[8]; /* Unique CPER Entry ID */ + uint32_t flags; /* Reserved */ + uint64_t persistence_info; /* Reserved */ + uint8_t reserved[12]; /* Reserved */ +}; + +struct cper_section_descriptor { + uint32_t sec_offset; /* Offset from the start of CPER entry */ + uint32_t sec_length; + uint8_t revision_minor; /* CPER_SEC_MINOR_REV_1 */ + uint8_t revision_major; /* CPER_SEC_MAJOR_REV_22 */ + union { + struct { + uint8_t fru_id : 1; + uint8_t fru_text : 1; + uint8_t reserved : 6; + } valid_bits; + uint8_t valid_mask; + }; + uint8_t reserved; + union { + struct { + uint32_t primary : 1; + uint32_t reserved1 : 2; + uint32_t exceed_err_threshold : 1; + uint32_t latent_err : 1; + uint32_t reserved2 : 27; + } flag_bits; + uint32_t flag_mask; + }; + struct ras_cper_guid sec_type; + char fru_id[16]; + enum ras_cper_severity severity; + char fru_text[20]; +}; + +struct runtime_hdr { + union { + struct { + uint64_t apic_id : 1; + uint64_t fw_id : 1; + uint64_t err_info_cnt : 6; + uint64_t err_context_cnt : 6; + } valid_bits; + uint64_t valid_mask; + }; + uint64_t apic_id; + char fw_id[48]; +}; + +struct runtime_descriptor { + struct ras_cper_guid error_type; + union { + struct { + uint64_t ms_chk : 1; + uint64_t target_addr_id : 1; + uint64_t req_id : 1; + uint64_t resp_id : 1; + uint64_t instr_ptr : 1; + uint64_t reserved : 59; + } valid_bits; + uint64_t valid_mask; + }; + union { + struct { + uint64_t err_type_valid : 1; + uint64_t pcc_valid : 1; + uint64_t uncorr_valid : 1; + uint64_t precise_ip_valid : 1; + uint64_t restartable_ip_valid : 1; + uint64_t overflow_valid : 1; + uint64_t reserved1 : 10; + uint64_t err_type : 2; + uint64_t pcc : 1; + uint64_t uncorr : 1; + uint64_t precised_ip : 1; + uint64_t restartable_ip : 1; + uint64_t overflow : 1; + uint64_t reserved2 : 41; + } ms_chk_bits; + uint64_t ms_chk_mask; + }; + uint64_t target_addr_id; + uint64_t req_id; + uint64_t resp_id; + uint64_t instr_ptr; +}; + +struct runtime_error_reg { + uint16_t reg_ctx_type; + uint16_t reg_arr_size; + uint32_t msr_addr; + uint64_t mm_reg_addr; + uint64_t reg_dump[RAS_CPER_ACA_REG_COUNT]; +}; + +struct cper_section_runtime { + struct runtime_hdr hdr; + struct runtime_descriptor descriptor; + struct runtime_error_reg reg; +}; + +struct crashdump_hdr { + uint64_t reserved1; + uint64_t reserved2; + char fw_id[48]; + uint64_t reserved3[8]; +}; + +struct fatal_reg_info { + uint64_t status; + uint64_t addr; + uint64_t ipid; + uint64_t synd; +}; + +struct crashdump_fatal { + uint16_t reg_ctx_type; + uint16_t reg_arr_size; + uint32_t reserved1; + uint64_t reserved2; + struct fatal_reg_info reg; +}; + +struct crashdump_boot { + uint16_t reg_ctx_type; + uint16_t reg_arr_size; + uint32_t reserved1; + uint64_t reserved2; + uint64_t msg[CPER_OAM_MAX_COUNT]; +}; + +struct cper_section_fatal { + struct crashdump_hdr hdr; + struct crashdump_fatal data; +}; + +struct cper_section_boot { + struct crashdump_hdr hdr; + struct crashdump_boot data; +}; + +struct ras_cper_fatal_record { + struct cper_section_hdr hdr; + struct cper_section_descriptor descriptor; + struct cper_section_fatal fatal; +}; +#pragma pack(pop) + +#define RAS_HDR_LEN (sizeof(struct cper_section_hdr)) +#define RAS_SEC_DESC_LEN (sizeof(struct cper_sec_desc)) + +#define RAS_BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot)) +#define RAS_FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal)) +#define RAS_NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err)) + +#define RAS_SEC_DESC_OFFSET(idx) (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * idx)) + +#define RAS_BOOT_SEC_OFFSET(count, idx) \ + (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_BOOT_SEC_LEN * idx)) +#define RAS_FATAL_SEC_OFFSET(count, idx) \ + (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_FATAL_SEC_LEN * idx)) +#define RAS_NONSTD_SEC_OFFSET(count, idx) \ + (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_NONSTD_SEC_LEN * idx)) + +struct ras_core_context; +struct ras_log_info; +int ras_cper_generate_cper(struct ras_core_context *ras_core, + struct ras_log_info **trace_list, uint32_t count, + uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c new file mode 100644 index 000000000000..cd6b057bdaf3 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c @@ -0,0 +1,1339 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras_eeprom.h" +#include "ras.h" + +/* These are memory addresses as would be seen by one or more EEPROM + * chips strung on the I2C bus, usually by manipulating pins 1-3 of a + * set of EEPROM devices. They form a continuous memory space. + * + * The I2C device address includes the device type identifier, 1010b, + * which is a reserved value and indicates that this is an I2C EEPROM + * device. It also includes the top 3 bits of the 19 bit EEPROM memory + * address, namely bits 18, 17, and 16. This makes up the 7 bit + * address sent on the I2C bus with bit 0 being the direction bit, + * which is not represented here, and sent by the hardware directly. + * + * For instance, + * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0. + * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h. + * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h. + * Depending on the size of the I2C EEPROM device(s), bits 18:16 may + * address memory in a device or a device on the I2C bus, depending on + * the status of pins 1-3. + * + * The RAS table lives either at address 0 or address 40000h of EEPROM. + */ +#define EEPROM_I2C_MADDR_0 0x0 +#define EEPROM_I2C_MADDR_4 0x40000 + +#define EEPROM_PAGE_BITS 8 +#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS) +#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1) + +#define EEPROM_OFFSET_SIZE 2 +#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF)) + +/* + * The 2 macros bellow represent the actual size in bytes that + * those entities occupy in the EEPROM memory. + * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_umc_record) which + * uses uint64 to store 6b fields such as retired_page. + */ +#define RAS_TABLE_HEADER_SIZE 20 +#define RAS_TABLE_RECORD_SIZE 24 + +/* Table hdr is 'AMDR' */ +#define RAS_TABLE_HDR_VAL 0x414d4452 + +/* Bad GPU tag ‘BADG’ */ +#define RAS_TABLE_HDR_BAD 0x42414447 + +/* + * EEPROM Table structure v1 + * --------------------------------- + * | | + * | EEPROM TABLE HEADER | + * | ( size 20 Bytes ) | + * | | + * --------------------------------- + * | | + * | BAD PAGE RECORD AREA | + * | | + * --------------------------------- + */ + +/* Assume 2-Mbit size EEPROM and take up the whole space. */ +#define RAS_TBL_SIZE_BYTES (256 * 1024) +#define RAS_TABLE_START 0 +#define RAS_HDR_START RAS_TABLE_START +#define RAS_RECORD_START (RAS_HDR_START + RAS_TABLE_HEADER_SIZE) +#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \ + / RAS_TABLE_RECORD_SIZE) + +/* + * EEPROM Table structrue v2.1 + * --------------------------------- + * | | + * | EEPROM TABLE HEADER | + * | ( size 20 Bytes ) | + * | | + * --------------------------------- + * | | + * | EEPROM TABLE RAS INFO | + * | (available info size 4 Bytes) | + * | ( reserved size 252 Bytes ) | + * | | + * --------------------------------- + * | | + * | BAD PAGE RECORD AREA | + * | | + * --------------------------------- + */ + +/* EEPROM Table V2_1 */ +#define RAS_TABLE_V2_1_INFO_SIZE 256 +#define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE +#define RAS_RECORD_START_V2_1 (RAS_HDR_START + RAS_TABLE_HEADER_SIZE + \ + RAS_TABLE_V2_1_INFO_SIZE) +#define RAS_MAX_RECORD_COUNT_V2_1 ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE - \ + RAS_TABLE_V2_1_INFO_SIZE) \ + / RAS_TABLE_RECORD_SIZE) + +/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM + * offset off of RAS_TABLE_START. That is, this is something you can + * add to control->i2c_address, and then tell I2C layer to read + * from/write to there. _N is the so called absolute index, + * because it starts right after the table header. + */ +#define RAS_INDEX_TO_OFFSET(_C, _N) ((_C)->ras_record_offset + \ + (_N) * RAS_TABLE_RECORD_SIZE) + +#define RAS_OFFSET_TO_INDEX(_C, _O) (((_O) - \ + (_C)->ras_record_offset) / RAS_TABLE_RECORD_SIZE) + +/* Given a 0-based relative record index, 0, 1, 2, ..., etc., off + * of "fri", return the absolute record index off of the end of + * the table header. + */ +#define RAS_RI_TO_AI(_C, _I) (((_I) + (_C)->ras_fri) % \ + (_C)->ras_max_record_count) + +#define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \ + RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE) + +#define RAS_NUM_RECS_V2_1(_tbl_hdr) (((_tbl_hdr)->tbl_size - \ + RAS_TABLE_HEADER_SIZE - \ + RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE) + +#define to_ras_core_context(x) (container_of(x, struct ras_core_context, ras_eeprom)) + +static bool __is_ras_eeprom_supported(struct ras_core_context *ras_core) +{ + return ras_core->ras_eeprom_supported; +} + +static bool __get_eeprom_i2c_addr(struct ras_core_context *ras_core, + struct ras_eeprom_control *control) +{ + int ret = -EINVAL; + + if (control->sys_func && + control->sys_func->update_eeprom_i2c_config) + ret = control->sys_func->update_eeprom_i2c_config(ras_core); + else + RAS_DEV_WARN(ras_core->dev, + "No eeprom i2c system config!\n"); + + return !ret ? true : false; +} + +static int __ras_eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr, + u8 *eeprom_buf, u32 buf_size, bool read) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + int ret; + + if (control->sys_func && control->sys_func->eeprom_i2c_xfer) { + ret = control->sys_func->eeprom_i2c_xfer(ras_core, + eeprom_addr, eeprom_buf, buf_size, read); + + if ((ret > 0) && !read) { + /* According to EEPROM specs the length of the + * self-writing cycle, tWR (tW), is 10 ms. + * + * TODO: Use polling on ACK, aka Acknowledge + * Polling, to minimize waiting for the + * internal write cycle to complete, as it is + * usually smaller than tWR (tW). + */ + msleep(10); + } + + return ret; + } + + RAS_DEV_ERR(ras_core->dev, "Error: No eeprom i2c system xfer function!\n"); + return -EINVAL; +} + +static int __eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr, + u8 *eeprom_buf, u32 buf_size, bool read) +{ + u16 limit; + u16 ps; /* Partial size */ + int res = 0, r; + + if (read) + limit = ras_core->ras_eeprom.max_read_len; + else + limit = ras_core->ras_eeprom.max_write_len; + + if (limit && (limit <= EEPROM_OFFSET_SIZE)) { + RAS_DEV_ERR(ras_core->dev, + "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d", + eeprom_addr, buf_size, + read ? "read" : "write", EEPROM_OFFSET_SIZE); + return -EINVAL; + } + + ras_core_down_gpu_reset_lock(ras_core); + + if (limit == 0) { + res = __ras_eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, buf_size, read); + } else { + /* The "limit" includes all data bytes sent/received, + * which would include the EEPROM_OFFSET_SIZE bytes. + * Account for them here. + */ + limit -= EEPROM_OFFSET_SIZE; + for ( ; buf_size > 0; + buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) { + ps = (buf_size < limit) ? buf_size : limit; + + r = __ras_eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, ps, read); + if (r < 0) + break; + + res += r; + } + } + + ras_core_up_gpu_reset_lock(ras_core); + + return res; +} + +static int __eeprom_read(struct ras_core_context *ras_core, + u32 eeprom_addr, u8 *eeprom_buf, u32 bytes) +{ + return __eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, bytes, true); +} + +static int __eeprom_write(struct ras_core_context *ras_core, + u32 eeprom_addr, u8 *eeprom_buf, u32 bytes) +{ + return __eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, bytes, false); +} + +static void +__encode_table_header_to_buf(struct ras_eeprom_table_header *hdr, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + + pp[0] = cpu_to_le32(hdr->header); + pp[1] = cpu_to_le32(hdr->version); + pp[2] = cpu_to_le32(hdr->first_rec_offset); + pp[3] = cpu_to_le32(hdr->tbl_size); + pp[4] = cpu_to_le32(hdr->checksum); +} + +static void +__decode_table_header_from_buf(struct ras_eeprom_table_header *hdr, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + + hdr->header = le32_to_cpu(pp[0]); + hdr->version = le32_to_cpu(pp[1]); + hdr->first_rec_offset = le32_to_cpu(pp[2]); + hdr->tbl_size = le32_to_cpu(pp[3]); + hdr->checksum = le32_to_cpu(pp[4]); +} + +static int __write_table_header(struct ras_eeprom_control *control) +{ + u8 buf[RAS_TABLE_HEADER_SIZE]; + struct ras_core_context *ras_core = to_ras_core_context(control); + int res; + + memset(buf, 0, sizeof(buf)); + __encode_table_header_to_buf(&control->tbl_hdr, buf); + + /* i2c may be unstable in gpu reset */ + res = __eeprom_write(ras_core, + control->i2c_address + + control->ras_header_offset, + buf, RAS_TABLE_HEADER_SIZE); + + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Failed to write EEPROM table header:%d\n", res); + } else if (res < RAS_TABLE_HEADER_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Short write:%d out of %d\n", res, RAS_TABLE_HEADER_SIZE); + res = -EIO; + } else { + res = 0; + } + + return res; +} + +static void +__encode_table_ras_info_to_buf(struct ras_eeprom_table_ras_info *rai, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + u32 tmp; + + tmp = ((uint32_t)(rai->rma_status) & 0xFF) | + (((uint32_t)(rai->health_percent) << 8) & 0xFF00) | + (((uint32_t)(rai->ecc_page_threshold) << 16) & 0xFFFF0000); + pp[0] = cpu_to_le32(tmp); +} + +static void +__decode_table_ras_info_from_buf(struct ras_eeprom_table_ras_info *rai, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + u32 tmp; + + tmp = le32_to_cpu(pp[0]); + rai->rma_status = tmp & 0xFF; + rai->health_percent = (tmp >> 8) & 0xFF; + rai->ecc_page_threshold = (tmp >> 16) & 0xFFFF; +} + +static int __write_table_ras_info(struct ras_eeprom_control *control) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + u8 *buf; + int res; + + buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "Failed to alloc buf to write table ras info\n"); + return -ENOMEM; + } + + __encode_table_ras_info_to_buf(&control->tbl_rai, buf); + + /* i2c may be unstable in gpu reset */ + res = __eeprom_write(ras_core, + control->i2c_address + + control->ras_info_offset, + buf, RAS_TABLE_V2_1_INFO_SIZE); + + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Failed to write EEPROM table ras info:%d\n", res); + } else if (res < RAS_TABLE_V2_1_INFO_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Short write:%d out of %d\n", res, RAS_TABLE_V2_1_INFO_SIZE); + res = -EIO; + } else { + res = 0; + } + + kfree(buf); + + return res; +} + +static u8 __calc_hdr_byte_sum(const struct ras_eeprom_control *control) +{ + int ii; + u8 *pp, csum; + u32 sz; + + /* Header checksum, skip checksum field in the calculation */ + sz = sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); + pp = (u8 *) &control->tbl_hdr; + csum = 0; + for (ii = 0; ii < sz; ii++, pp++) + csum += *pp; + + return csum; +} + +static u8 __calc_ras_info_byte_sum(const struct ras_eeprom_control *control) +{ + int ii; + u8 *pp, csum; + u32 sz; + + sz = sizeof(control->tbl_rai); + pp = (u8 *) &control->tbl_rai; + csum = 0; + for (ii = 0; ii < sz; ii++, pp++) + csum += *pp; + + return csum; +} + +static int ras_eeprom_correct_header_tag( + struct ras_eeprom_control *control, + uint32_t header) +{ + struct ras_eeprom_table_header *hdr = &control->tbl_hdr; + u8 *hh; + int res; + u8 csum; + + csum = -hdr->checksum; + + hh = (void *) &hdr->header; + csum -= (hh[0] + hh[1] + hh[2] + hh[3]); + hh = (void *) &header; + csum += hh[0] + hh[1] + hh[2] + hh[3]; + csum = -csum; + mutex_lock(&control->ras_tbl_mutex); + hdr->header = header; + hdr->checksum = csum; + res = __write_table_header(control); + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +static void ras_set_eeprom_table_version(struct ras_eeprom_control *control) +{ + struct ras_eeprom_table_header *hdr = &control->tbl_hdr; + + hdr->version = RAS_TABLE_VER_V3; +} + +int ras_eeprom_reset_table(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + struct ras_eeprom_table_header *hdr = &control->tbl_hdr; + struct ras_eeprom_table_ras_info *rai = &control->tbl_rai; + u8 csum; + int res; + + mutex_lock(&control->ras_tbl_mutex); + + hdr->header = RAS_TABLE_HDR_VAL; + ras_set_eeprom_table_version(control); + + if (hdr->version >= RAS_TABLE_VER_V2_1) { + hdr->first_rec_offset = RAS_RECORD_START_V2_1; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE; + rai->rma_status = RAS_GPU_HEALTH_USABLE; + /** + * GPU health represented as a percentage. + * 0 means worst health, 100 means fully health. + */ + rai->health_percent = 100; + /* ecc_page_threshold = 0 means disable bad page retirement */ + rai->ecc_page_threshold = control->record_threshold_count; + } else { + hdr->first_rec_offset = RAS_RECORD_START; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE; + } + + csum = __calc_hdr_byte_sum(control); + if (hdr->version >= RAS_TABLE_VER_V2_1) + csum += __calc_ras_info_byte_sum(control); + csum = -csum; + hdr->checksum = csum; + res = __write_table_header(control); + if (!res && hdr->version > RAS_TABLE_VER_V1) + res = __write_table_ras_info(control); + + control->ras_num_recs = 0; + control->ras_fri = 0; + + control->bad_channel_bitmap = 0; + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM, + &control->ras_num_recs); + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP, + &control->bad_channel_bitmap); + control->update_channel_flag = false; + + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +static void +__encode_table_record_to_buf(struct ras_eeprom_control *control, + struct eeprom_umc_record *record, + unsigned char *buf) +{ + __le64 tmp = 0; + int i = 0; + + /* Next are all record fields according to EEPROM page spec in LE foramt */ + buf[i++] = record->err_type; + + buf[i++] = record->bank; + + tmp = cpu_to_le64(record->ts); + memcpy(buf + i, &tmp, 8); + i += 8; + + tmp = cpu_to_le64((record->offset & 0xffffffffffff)); + memcpy(buf + i, &tmp, 6); + i += 6; + + buf[i++] = record->mem_channel; + buf[i++] = record->mcumc_id; + + tmp = cpu_to_le64((record->retired_row_pfn & 0xffffffffffff)); + memcpy(buf + i, &tmp, 6); +} + +static void +__decode_table_record_from_buf(struct ras_eeprom_control *control, + struct eeprom_umc_record *record, + unsigned char *buf) +{ + __le64 tmp = 0; + int i = 0; + + /* Next are all record fields according to EEPROM page spec in LE foramt */ + record->err_type = buf[i++]; + + record->bank = buf[i++]; + + memcpy(&tmp, buf + i, 8); + record->ts = le64_to_cpu(tmp); + i += 8; + + memcpy(&tmp, buf + i, 6); + record->offset = (le64_to_cpu(tmp) & 0xffffffffffff); + i += 6; + + record->mem_channel = buf[i++]; + record->mcumc_id = buf[i++]; + + memcpy(&tmp, buf + i, 6); + record->retired_row_pfn = (le64_to_cpu(tmp) & 0xffffffffffff); +} + +bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + bool ret = false; + int bad_page_count; + + if (!__is_ras_eeprom_supported(ras_core) || + !control->record_threshold_config) + return false; + + bad_page_count = ras_umc_get_badpage_count(ras_core); + if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD) { + if (bad_page_count > control->record_threshold_count) + RAS_DEV_WARN(ras_core->dev, "RAS records:%d exceed threshold:%d", + bad_page_count, control->record_threshold_count); + + if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) || + (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) { + RAS_DEV_WARN(ras_core->dev, + "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n"); + ret = false; + } else { + ras_core->is_rma = true; + RAS_DEV_WARN(ras_core->dev, + "Please consider adjusting the customized threshold.\n"); + ret = true; + } + } + + return ret; +} + +/** + * __ras_eeprom_write -- write indexed from buffer to EEPROM + * @control: pointer to control structure + * @buf: pointer to buffer containing data to write + * @fri: start writing at this index + * @num: number of records to write + * + * The caller must hold the table mutex in @control. + * Return 0 on success, -errno otherwise. + */ +static int __ras_eeprom_write(struct ras_eeprom_control *control, + u8 *buf, const u32 fri, const u32 num) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + u32 buf_size; + int res; + + /* i2c may be unstable in gpu reset */ + buf_size = num * RAS_TABLE_RECORD_SIZE; + res = __eeprom_write(ras_core, + control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri), + buf, buf_size); + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Writing %d EEPROM table records error:%d\n", num, res); + } else if (res < buf_size) { + /* Short write, return error.*/ + RAS_DEV_ERR(ras_core->dev, + "Wrote %d records out of %d\n", + (res/RAS_TABLE_RECORD_SIZE), num); + res = -EIO; + } else { + res = 0; + } + + return res; +} + +static int ras_eeprom_append_table(struct ras_eeprom_control *control, + struct eeprom_umc_record *record, + const u32 num) +{ + u32 a, b, i; + u8 *buf, *pp; + int res; + + buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Encode all of them in one go. + */ + pp = buf; + for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) { + __encode_table_record_to_buf(control, &record[i], pp); + + /* update bad channel bitmap */ + if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) && + !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) { + control->bad_channel_bitmap |= 1 << record[i].mem_channel; + control->update_channel_flag = true; + } + } + + /* a, first record index to write into. + * b, last record index to write into. + * a = first index to read (fri) + number of records in the table, + * b = a + @num - 1. + * Let N = control->ras_max_num_record_count, then we have, + * case 0: 0 <= a <= b < N, + * just append @num records starting at a; + * case 1: 0 <= a < N <= b, + * append (N - a) records starting at a, and + * append the remainder, b % N + 1, starting at 0. + * case 2: 0 <= fri < N <= a <= b, then modulo N we get two subcases, + * case 2a: 0 <= a <= b < N + * append num records starting at a; and fix fri if b overwrote it, + * and since a <= b, if b overwrote it then a must've also, + * and if b didn't overwrite it, then a didn't also. + * case 2b: 0 <= b < a < N + * write num records starting at a, which wraps around 0=N + * and overwrite fri unconditionally. Now from case 2a, + * this means that b eclipsed fri to overwrite it and wrap + * around 0 again, i.e. b = 2N+r pre modulo N, so we unconditionally + * set fri = b + 1 (mod N). + * Now, since fri is updated in every case, except the trivial case 0, + * the number of records present in the table after writing, is, + * num_recs - 1 = b - fri (mod N), and we take the positive value, + * by adding an arbitrary multiple of N before taking the modulo N + * as shown below. + */ + a = control->ras_fri + control->ras_num_recs; + b = a + num - 1; + if (b < control->ras_max_record_count) { + res = __ras_eeprom_write(control, buf, a, num); + } else if (a < control->ras_max_record_count) { + u32 g0, g1; + + g0 = control->ras_max_record_count - a; + g1 = b % control->ras_max_record_count + 1; + res = __ras_eeprom_write(control, buf, a, g0); + if (res) + goto Out; + res = __ras_eeprom_write(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, + 0, g1); + if (res) + goto Out; + if (g1 > control->ras_fri) + control->ras_fri = g1 % control->ras_max_record_count; + } else { + a %= control->ras_max_record_count; + b %= control->ras_max_record_count; + + if (a <= b) { + /* Note that, b - a + 1 = num. */ + res = __ras_eeprom_write(control, buf, a, num); + if (res) + goto Out; + if (b >= control->ras_fri) + control->ras_fri = (b + 1) % control->ras_max_record_count; + } else { + u32 g0, g1; + + /* b < a, which means, we write from + * a to the end of the table, and from + * the start of the table to b. + */ + g0 = control->ras_max_record_count - a; + g1 = b + 1; + res = __ras_eeprom_write(control, buf, a, g0); + if (res) + goto Out; + res = __ras_eeprom_write(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1); + if (res) + goto Out; + control->ras_fri = g1 % control->ras_max_record_count; + } + } + control->ras_num_recs = 1 + + (control->ras_max_record_count + b - control->ras_fri) + % control->ras_max_record_count; +Out: + kfree(buf); + return res; +} + +static int ras_eeprom_update_header(struct ras_eeprom_control *control) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + int threshold_config = control->record_threshold_config; + u8 *buf, *pp, csum; + u32 buf_size; + int bad_page_count; + int res; + + bad_page_count = ras_umc_get_badpage_count(ras_core); + /* Modify the header if it exceeds. + */ + if (threshold_config != 0 && + bad_page_count > control->record_threshold_count) { + RAS_DEV_WARN(ras_core->dev, + "Saved bad pages %d reaches threshold value %d\n", + bad_page_count, control->record_threshold_count); + control->tbl_hdr.header = RAS_TABLE_HDR_BAD; + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) { + control->tbl_rai.rma_status = RAS_GPU_RETIRED__ECC_REACH_THRESHOLD; + control->tbl_rai.health_percent = 0; + } + + if ((threshold_config != WARN_NONSTOP_OVER_THRESHOLD) && + (threshold_config != NONSTOP_OVER_THRESHOLD)) + ras_core->is_rma = true; + + /* ignore the -ENOTSUPP return value */ + ras_core_event_notify(ras_core, RAS_EVENT_ID__DEVICE_RMA, NULL); + } + + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + else + control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + control->tbl_hdr.checksum = 0; + + buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "allocating memory for table of size %d bytes failed\n", + control->tbl_hdr.tbl_size); + res = -ENOMEM; + goto Out; + } + + res = __eeprom_read(ras_core, + control->i2c_address + + control->ras_record_offset, + buf, buf_size); + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "EEPROM failed reading records:%d\n", res); + goto Out; + } else if (res < buf_size) { + RAS_DEV_ERR(ras_core->dev, + "EEPROM read %d out of %d bytes\n", res, buf_size); + res = -EIO; + goto Out; + } + + /** + * bad page records have been stored in eeprom, + * now calculate gpu health percent + */ + if (threshold_config != 0 && + control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 && + bad_page_count <= control->record_threshold_count) + control->tbl_rai.health_percent = ((control->record_threshold_count - + bad_page_count) * 100) / control->record_threshold_count; + + /* Recalc the checksum. + */ + csum = 0; + for (pp = buf; pp < buf + buf_size; pp++) + csum += *pp; + + csum += __calc_hdr_byte_sum(control); + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + csum += __calc_ras_info_byte_sum(control); + /* avoid sign extension when assigning to "checksum" */ + csum = -csum; + control->tbl_hdr.checksum = csum; + res = __write_table_header(control); + if (!res && control->tbl_hdr.version > RAS_TABLE_VER_V1) + res = __write_table_ras_info(control); +Out: + kfree(buf); + return res; +} + +/** + * ras_core_eeprom_append -- append records to the EEPROM RAS table + * @control: pointer to control structure + * @record: array of records to append + * @num: number of records in @record array + * + * Append @num records to the table, calculate the checksum and write + * the table back to EEPROM. The maximum number of records that + * can be appended is between 1 and control->ras_max_record_count, + * regardless of how many records are already stored in the table. + * + * Return 0 on success or if EEPROM is not supported, -errno on error. + */ +int ras_eeprom_append(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, const u32 num) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + int res; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (num == 0) { + RAS_DEV_ERR(ras_core->dev, "will not append 0 records\n"); + return -EINVAL; + } else if ((num + control->ras_num_recs) > control->ras_max_record_count) { + RAS_DEV_ERR(ras_core->dev, + "cannot append %d records than the size of table %d\n", + num, control->ras_max_record_count); + return -EINVAL; + } + + mutex_lock(&control->ras_tbl_mutex); + res = ras_eeprom_append_table(control, record, num); + if (!res) + res = ras_eeprom_update_header(control); + + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +/** + * __ras_eeprom_read -- read indexed from EEPROM into buffer + * @control: pointer to control structure + * @buf: pointer to buffer to read into + * @fri: first record index, start reading at this index, absolute index + * @num: number of records to read + * + * The caller must hold the table mutex in @control. + * Return 0 on success, -errno otherwise. + */ +static int __ras_eeprom_read(struct ras_eeprom_control *control, + u8 *buf, const u32 fri, const u32 num) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + u32 buf_size; + int res; + + /* i2c may be unstable in gpu reset */ + buf_size = num * RAS_TABLE_RECORD_SIZE; + res = __eeprom_read(ras_core, + control->i2c_address + + RAS_INDEX_TO_OFFSET(control, fri), + buf, buf_size); + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Reading %d EEPROM table records error:%d\n", num, res); + } else if (res < buf_size) { + /* Short read, return error. + */ + RAS_DEV_ERR(ras_core->dev, + "Read %d records out of %d\n", + (res/RAS_TABLE_RECORD_SIZE), num); + res = -EIO; + } else { + res = 0; + } + + return res; +} + +int ras_eeprom_read(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, const u32 num) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + int i, res; + u8 *buf, *pp; + u32 g0, g1; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (num == 0) { + RAS_DEV_ERR(ras_core->dev, "will not read 0 records\n"); + return -EINVAL; + } else if (num > control->ras_num_recs) { + RAS_DEV_ERR(ras_core->dev, + "too many records to read:%d available:%d\n", + num, control->ras_num_recs); + return -EINVAL; + } + + buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Determine how many records to read, from the first record + * index, fri, to the end of the table, and from the beginning + * of the table, such that the total number of records is + * @num, and we handle wrap around when fri > 0 and + * fri + num > RAS_MAX_RECORD_COUNT. + * + * First we compute the index of the last element + * which would be fetched from each region, + * g0 is in [fri, fri + num - 1], and + * g1 is in [0, RAS_MAX_RECORD_COUNT - 1]. + * Then, if g0 < RAS_MAX_RECORD_COUNT, the index of + * the last element to fetch, we set g0 to _the number_ + * of elements to fetch, @num, since we know that the last + * indexed to be fetched does not exceed the table. + * + * If, however, g0 >= RAS_MAX_RECORD_COUNT, then + * we set g0 to the number of elements to read + * until the end of the table, and g1 to the number of + * elements to read from the beginning of the table. + */ + g0 = control->ras_fri + num - 1; + g1 = g0 % control->ras_max_record_count; + if (g0 < control->ras_max_record_count) { + g0 = num; + g1 = 0; + } else { + g0 = control->ras_max_record_count - control->ras_fri; + g1 += 1; + } + + mutex_lock(&control->ras_tbl_mutex); + res = __ras_eeprom_read(control, buf, control->ras_fri, g0); + if (res) + goto Out; + if (g1) { + res = __ras_eeprom_read(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1); + if (res) + goto Out; + } + + res = 0; + + /* Read up everything? Then transform. + */ + pp = buf; + for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) { + __decode_table_record_from_buf(control, &record[i], pp); + + /* update bad channel bitmap */ + if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) && + !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) { + control->bad_channel_bitmap |= 1 << record[i].mem_channel; + control->update_channel_flag = true; + } + } +Out: + kfree(buf); + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + + /* get available eeprom table version first before eeprom table init */ + ras_set_eeprom_table_version(control); + + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + return RAS_MAX_RECORD_COUNT_V2_1; + else + return RAS_MAX_RECORD_COUNT; +} + +/** + * __verify_ras_table_checksum -- verify the RAS EEPROM table checksum + * @control: pointer to control structure + * + * Check the checksum of the stored in EEPROM RAS table. + * + * Return 0 if the checksum is correct, + * positive if it is not correct, and + * -errno on I/O error. + */ +static int __verify_ras_table_checksum(struct ras_eeprom_control *control) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + int buf_size, res; + u8 csum, *buf, *pp; + + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + buf_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + else + buf_size = RAS_TABLE_HEADER_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "Out of memory checking RAS table checksum.\n"); + return -ENOMEM; + } + + res = __eeprom_read(ras_core, + control->i2c_address + + control->ras_header_offset, + buf, buf_size); + if (res < buf_size) { + RAS_DEV_ERR(ras_core->dev, + "Partial read for checksum, res:%d\n", res); + /* On partial reads, return -EIO. + */ + if (res >= 0) + res = -EIO; + goto Out; + } + + csum = 0; + for (pp = buf; pp < buf + buf_size; pp++) + csum += *pp; +Out: + kfree(buf); + return res < 0 ? res : csum; +} + +static int __read_table_ras_info(struct ras_eeprom_control *control) +{ + struct ras_eeprom_table_ras_info *rai = &control->tbl_rai; + struct ras_core_context *ras_core = to_ras_core_context(control); + unsigned char *buf; + int res; + + buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "Failed to alloc buf to read EEPROM table ras info\n"); + return -ENOMEM; + } + + /** + * EEPROM table V2_1 supports ras info, + * read EEPROM table ras info + */ + res = __eeprom_read(ras_core, + control->i2c_address + control->ras_info_offset, + buf, RAS_TABLE_V2_1_INFO_SIZE); + if (res < RAS_TABLE_V2_1_INFO_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Failed to read EEPROM table ras info, res:%d\n", res); + res = res >= 0 ? -EIO : res; + goto Out; + } + + __decode_table_ras_info_from_buf(rai, buf); + +Out: + kfree(buf); + return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res; +} + +static int __check_ras_table_status(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 }; + struct ras_eeprom_table_header *hdr; + int res; + + hdr = &control->tbl_hdr; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (!__get_eeprom_i2c_addr(ras_core, control)) + return -EINVAL; + + control->ras_header_offset = RAS_HDR_START; + control->ras_info_offset = RAS_TABLE_V2_1_INFO_START; + mutex_init(&control->ras_tbl_mutex); + + /* Read the table header from EEPROM address */ + res = __eeprom_read(ras_core, + control->i2c_address + control->ras_header_offset, + buf, RAS_TABLE_HEADER_SIZE); + if (res < RAS_TABLE_HEADER_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Failed to read EEPROM table header, res:%d\n", res); + return res >= 0 ? -EIO : res; + } + + __decode_table_header_from_buf(hdr, buf); + + if (hdr->header != RAS_TABLE_HDR_VAL && + hdr->header != RAS_TABLE_HDR_BAD) { + RAS_DEV_INFO(ras_core->dev, "Creating a new EEPROM table"); + return ras_eeprom_reset_table(ras_core); + } + + switch (hdr->version) { + case RAS_TABLE_VER_V2_1: + case RAS_TABLE_VER_V3: + control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr); + control->ras_record_offset = RAS_RECORD_START_V2_1; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1; + break; + case RAS_TABLE_VER_V1: + control->ras_num_recs = RAS_NUM_RECS(hdr); + control->ras_record_offset = RAS_RECORD_START; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT; + break; + default: + RAS_DEV_ERR(ras_core->dev, + "RAS header invalid, unsupported version: %u", + hdr->version); + return -EINVAL; + } + + if (control->ras_num_recs > control->ras_max_record_count) { + RAS_DEV_ERR(ras_core->dev, + "RAS header invalid, records in header: %u max allowed :%u", + control->ras_num_recs, control->ras_max_record_count); + return -EINVAL; + } + + control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset); + + return 0; +} + +int ras_eeprom_check_storage_status(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + struct ras_eeprom_table_header *hdr; + int bad_page_count; + int res = 0; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (!__get_eeprom_i2c_addr(ras_core, control)) + return -EINVAL; + + hdr = &control->tbl_hdr; + + bad_page_count = ras_umc_get_badpage_count(ras_core); + if (hdr->header == RAS_TABLE_HDR_VAL) { + RAS_DEV_INFO(ras_core->dev, + "Found existing EEPROM table with %d records\n", + bad_page_count); + + if (hdr->version >= RAS_TABLE_VER_V2_1) { + res = __read_table_ras_info(control); + if (res) + return res; + } + + res = __verify_ras_table_checksum(control); + if (res) + RAS_DEV_ERR(ras_core->dev, + "RAS table incorrect checksum or error:%d\n", res); + + /* Warn if we are at 90% of the threshold or above + */ + if (10 * bad_page_count >= 9 * control->record_threshold_count) + RAS_DEV_WARN(ras_core->dev, + "RAS records:%u exceeds 90%% of threshold:%d\n", + bad_page_count, + control->record_threshold_count); + + } else if (hdr->header == RAS_TABLE_HDR_BAD && + control->record_threshold_config != 0) { + if (hdr->version >= RAS_TABLE_VER_V2_1) { + res = __read_table_ras_info(control); + if (res) + return res; + } + + res = __verify_ras_table_checksum(control); + if (res) + RAS_DEV_ERR(ras_core->dev, + "RAS Table incorrect checksum or error:%d\n", res); + + if (control->record_threshold_count >= bad_page_count) { + /* This means that, the threshold was increased since + * the last time the system was booted, and now, + * ras->record_threshold_count - control->num_recs > 0, + * so that at least one more record can be saved, + * before the page count threshold is reached. + */ + RAS_DEV_INFO(ras_core->dev, + "records:%d threshold:%d, resetting RAS table header signature", + bad_page_count, + control->record_threshold_count); + res = ras_eeprom_correct_header_tag(control, RAS_TABLE_HDR_VAL); + } else { + RAS_DEV_ERR(ras_core->dev, "RAS records:%d exceed threshold:%d", + bad_page_count, control->record_threshold_count); + if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) || + (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) { + RAS_DEV_WARN(ras_core->dev, + "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n"); + res = 0; + } else { + ras_core->is_rma = true; + RAS_DEV_ERR(ras_core->dev, + "User defined threshold is set, runtime service will be halt when threshold is reached\n"); + } + } + } + + return res < 0 ? res : 0; +} + +int ras_eeprom_hw_init(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control; + struct ras_eeprom_config *eeprom_cfg; + + if (!ras_core) + return -EINVAL; + + ras_core->is_rma = false; + + control = &ras_core->ras_eeprom; + + memset(control, 0, sizeof(*control)); + + eeprom_cfg = &ras_core->config->eeprom_cfg; + control->record_threshold_config = + eeprom_cfg->eeprom_record_threshold_config; + + control->record_threshold_count = ras_eeprom_max_record_count(ras_core); + if (eeprom_cfg->eeprom_record_threshold_count < + control->record_threshold_count) + control->record_threshold_count = + eeprom_cfg->eeprom_record_threshold_count; + + control->sys_func = eeprom_cfg->eeprom_sys_fn; + control->max_read_len = eeprom_cfg->max_i2c_read_len; + control->max_write_len = eeprom_cfg->max_i2c_write_len; + control->i2c_adapter = eeprom_cfg->eeprom_i2c_adapter; + control->i2c_port = eeprom_cfg->eeprom_i2c_port; + control->i2c_address = eeprom_cfg->eeprom_i2c_addr; + + control->update_channel_flag = false; + + return __check_ras_table_status(ras_core); +} + +int ras_eeprom_hw_fini(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control; + + if (!ras_core) + return -EINVAL; + + control = &ras_core->ras_eeprom; + mutex_destroy(&control->ras_tbl_mutex); + + return 0; +} + +uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core) +{ + if (!ras_core) + return 0; + + return ras_core->ras_eeprom.ras_num_recs; +} + +void ras_eeprom_sync_info(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control; + + if (!ras_core) + return; + + control = &ras_core->ras_eeprom; + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM, + &control->ras_num_recs); + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP, + &control->bad_channel_bitmap); +} + +enum ras_gpu_health_status + ras_eeprom_check_gpu_status(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + struct ras_eeprom_table_ras_info *rai = &control->tbl_rai; + + if (!__is_ras_eeprom_supported(ras_core) || + !control->record_threshold_config) + return RAS_GPU_HEALTH_NONE; + + if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD) + return RAS_GPU_IN_BAD_STATUS; + + return rai->rma_status; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h new file mode 100644 index 000000000000..2abe566c18b6 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_EEPROM_H__ +#define __RAS_EEPROM_H__ +#include "ras_sys.h" + +#define RAS_TABLE_VER_V1 0x00010000 +#define RAS_TABLE_VER_V2_1 0x00021000 +#define RAS_TABLE_VER_V3 0x00030000 + +#define NONSTOP_OVER_THRESHOLD -2 +#define WARN_NONSTOP_OVER_THRESHOLD -1 +#define DISABLE_RETIRE_PAGE 0 + +/* + * Bad address pfn : eeprom_umc_record.retired_row_pfn[39:0], + * nps mode: eeprom_umc_record.retired_row_pfn[47:40] + */ +#define EEPROM_RECORD_UMC_ADDR_MASK 0xFFFFFFFFFFULL +#define EEPROM_RECORD_UMC_NPS_MASK 0xFF0000000000ULL +#define EEPROM_RECORD_UMC_NPS_SHIFT 40 + +#define EEPROM_RECORD_UMC_NPS_MODE(RECORD) \ + (((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_NPS_MASK) >> \ + EEPROM_RECORD_UMC_NPS_SHIFT) + +#define EEPROM_RECORD_UMC_ADDR_PFN(RECORD) \ + ((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_ADDR_MASK) + +#define EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(RECORD, ADDR, NPS) \ +do { \ + uint64_t tmp = (NPS); \ + tmp = ((tmp << EEPROM_RECORD_UMC_NPS_SHIFT) & EEPROM_RECORD_UMC_NPS_MASK); \ + tmp |= (ADDR) & EEPROM_RECORD_UMC_ADDR_MASK; \ + (RECORD)->retired_row_pfn = tmp; \ +} while (0) + +enum ras_gpu_health_status { + RAS_GPU_HEALTH_NONE = 0, + RAS_GPU_HEALTH_USABLE = 1, + RAS_GPU_RETIRED__ECC_REACH_THRESHOLD = 2, + RAS_GPU_IN_BAD_STATUS = 3, +}; + +enum ras_eeprom_err_type { + RAS_EEPROM_ERR_NA, + RAS_EEPROM_ERR_RECOVERABLE, + RAS_EEPROM_ERR_NON_RECOVERABLE, + RAS_EEPROM_ERR_COUNT, +}; + +struct ras_eeprom_table_header { + uint32_t header; + uint32_t version; + uint32_t first_rec_offset; + uint32_t tbl_size; + uint32_t checksum; +} __packed; + +struct ras_eeprom_table_ras_info { + u8 rma_status; + u8 health_percent; + u16 ecc_page_threshold; + u32 padding[64 - 1]; +} __packed; + +struct ras_eeprom_control { + struct ras_eeprom_table_header tbl_hdr; + struct ras_eeprom_table_ras_info tbl_rai; + + /* record threshold */ + int record_threshold_config; + uint32_t record_threshold_count; + bool update_channel_flag; + + const struct ras_eeprom_sys_func *sys_func; + void *i2c_adapter; + u32 i2c_port; + u16 max_read_len; + u16 max_write_len; + + /* Base I2C EEPPROM 19-bit memory address, + * where the table is located. For more information, + * see top of amdgpu_eeprom.c. + */ + u32 i2c_address; + + /* The byte offset off of @i2c_address + * where the table header is found, + * and where the records start--always + * right after the header. + */ + u32 ras_header_offset; + u32 ras_info_offset; + u32 ras_record_offset; + + /* Number of records in the table. + */ + u32 ras_num_recs; + + /* First record index to read, 0-based. + * Range is [0, num_recs-1]. This is + * an absolute index, starting right after + * the table header. + */ + u32 ras_fri; + + /* Maximum possible number of records + * we could store, i.e. the maximum capacity + * of the table. + */ + u32 ras_max_record_count; + + /* Protect table access via this mutex. + */ + struct mutex ras_tbl_mutex; + + /* Record channel info which occurred bad pages + */ + u32 bad_channel_bitmap; +}; + +/* + * Represents single table record. Packed to be easily serialized into byte + * stream. + */ +struct eeprom_umc_record { + + union { + uint64_t address; + uint64_t offset; + }; + + uint64_t retired_row_pfn; + uint64_t ts; + + enum ras_eeprom_err_type err_type; + + union { + unsigned char bank; + unsigned char cu; + }; + + unsigned char mem_channel; + unsigned char mcumc_id; + + /* The following variables will not be saved to eeprom. + */ + uint64_t cur_nps_retired_row_pfn; + uint32_t cur_nps_bank; + uint32_t cur_nps; +}; + +struct ras_core_context; +int ras_eeprom_hw_init(struct ras_core_context *ras_core); +int ras_eeprom_hw_fini(struct ras_core_context *ras_core); + +int ras_eeprom_reset_table(struct ras_core_context *ras_core); + +bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core); + +int ras_eeprom_read(struct ras_core_context *ras_core, + struct eeprom_umc_record *records, const u32 num); + +int ras_eeprom_append(struct ras_core_context *ras_core, + struct eeprom_umc_record *records, const u32 num); + +uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core); +uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core); +void ras_eeprom_sync_info(struct ras_core_context *ras_core); + +int ras_eeprom_check_storage_status(struct ras_core_context *ras_core); +enum ras_gpu_health_status + ras_eeprom_check_gpu_status(struct ras_core_context *ras_core); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c new file mode 100644 index 000000000000..f5ce28777705 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_gfx_v9_0.h" +#include "ras_gfx.h" +#include "ras_core_status.h" + +static const struct ras_gfx_ip_func *ras_gfx_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + case IP_VERSION(9, 5, 0): + return &gfx_ras_func_v9_0; + default: + RAS_DEV_ERR(ras_core->dev, + "GFX ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock) +{ + struct ras_gfx *gfx = &ras_core->ras_gfx; + + return gfx->ip_func->get_ta_subblock(ras_core, + error_type, subblock, ta_subblock); +} + +int ras_gfx_hw_init(struct ras_core_context *ras_core) +{ + struct ras_gfx *gfx = &ras_core->ras_gfx; + + gfx->gfx_ip_version = ras_core->config->gfx_ip_version; + + gfx->ip_func = ras_gfx_get_ip_funcs(ras_core, gfx->gfx_ip_version); + + return gfx->ip_func ? RAS_CORE_OK : -EINVAL; +} + +int ras_gfx_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h new file mode 100644 index 000000000000..8a42d69fb0ad --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_GFX_H__ +#define __RAS_GFX_H__ + +struct ras_gfx_ip_func { + int (*get_ta_subblock)(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock); +}; + +struct ras_gfx { + uint32_t gfx_ip_version; + const struct ras_gfx_ip_func *ip_func; +}; + +int ras_gfx_hw_init(struct ras_core_context *ras_core); +int ras_gfx_hw_fini(struct ras_core_context *ras_core); + +int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock); + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c new file mode 100644 index 000000000000..6213d3f125be --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c @@ -0,0 +1,426 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_gfx_v9_0.h" +#include "ras_core_status.h" + +enum ta_gfx_v9_subblock { + /*CPC*/ + TA_GFX_V9__GFX_CPC_INDEX_START = 0, + TA_GFX_V9__GFX_CPC_SCRATCH = TA_GFX_V9__GFX_CPC_INDEX_START, + TA_GFX_V9__GFX_CPC_UCODE, + TA_GFX_V9__GFX_DC_STATE_ME1, + TA_GFX_V9__GFX_DC_CSINVOC_ME1, + TA_GFX_V9__GFX_DC_RESTORE_ME1, + TA_GFX_V9__GFX_DC_STATE_ME2, + TA_GFX_V9__GFX_DC_CSINVOC_ME2, + TA_GFX_V9__GFX_DC_RESTORE_ME2, + TA_GFX_V9__GFX_CPC_INDEX_END = TA_GFX_V9__GFX_DC_RESTORE_ME2, + /* CPF*/ + TA_GFX_V9__GFX_CPF_INDEX_START, + TA_GFX_V9__GFX_CPF_ROQ_ME2 = TA_GFX_V9__GFX_CPF_INDEX_START, + TA_GFX_V9__GFX_CPF_ROQ_ME1, + TA_GFX_V9__GFX_CPF_TAG, + TA_GFX_V9__GFX_CPF_INDEX_END = TA_GFX_V9__GFX_CPF_TAG, + /* CPG*/ + TA_GFX_V9__GFX_CPG_INDEX_START, + TA_GFX_V9__GFX_CPG_DMA_ROQ = TA_GFX_V9__GFX_CPG_INDEX_START, + TA_GFX_V9__GFX_CPG_DMA_TAG, + TA_GFX_V9__GFX_CPG_TAG, + TA_GFX_V9__GFX_CPG_INDEX_END = TA_GFX_V9__GFX_CPG_TAG, + /* GDS*/ + TA_GFX_V9__GFX_GDS_INDEX_START, + TA_GFX_V9__GFX_GDS_MEM = TA_GFX_V9__GFX_GDS_INDEX_START, + TA_GFX_V9__GFX_GDS_INPUT_QUEUE, + TA_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM, + TA_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM, + TA_GFX_V9__GFX_GDS_OA_PIPE_MEM, + TA_GFX_V9__GFX_GDS_INDEX_END = TA_GFX_V9__GFX_GDS_OA_PIPE_MEM, + /* SPI*/ + TA_GFX_V9__GFX_SPI_SR_MEM, + /* SQ*/ + TA_GFX_V9__GFX_SQ_INDEX_START, + TA_GFX_V9__GFX_SQ_SGPR = TA_GFX_V9__GFX_SQ_INDEX_START, + TA_GFX_V9__GFX_SQ_LDS_D, + TA_GFX_V9__GFX_SQ_LDS_I, + TA_GFX_V9__GFX_SQ_VGPR, /* VGPR = SP*/ + TA_GFX_V9__GFX_SQ_INDEX_END = TA_GFX_V9__GFX_SQ_VGPR, + /* SQC (3 ranges)*/ + TA_GFX_V9__GFX_SQC_INDEX_START, + /* SQC range 0*/ + TA_GFX_V9__GFX_SQC_INDEX0_START = TA_GFX_V9__GFX_SQC_INDEX_START, + TA_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO = + TA_GFX_V9__GFX_SQC_INDEX0_START, + TA_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF, + TA_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO, + TA_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF, + TA_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO, + TA_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF, + TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + TA_GFX_V9__GFX_SQC_INDEX0_END = + TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + /* SQC range 1*/ + TA_GFX_V9__GFX_SQC_INDEX1_START, + TA_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM = + TA_GFX_V9__GFX_SQC_INDEX1_START, + TA_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + TA_GFX_V9__GFX_SQC_INDEX1_END = + TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + /* SQC range 2*/ + TA_GFX_V9__GFX_SQC_INDEX2_START, + TA_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM = + TA_GFX_V9__GFX_SQC_INDEX2_START, + TA_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + TA_GFX_V9__GFX_SQC_INDEX2_END = + TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + TA_GFX_V9__GFX_SQC_INDEX_END = TA_GFX_V9__GFX_SQC_INDEX2_END, + /* TA*/ + TA_GFX_V9__GFX_TA_INDEX_START, + TA_GFX_V9__GFX_TA_FS_DFIFO = TA_GFX_V9__GFX_TA_INDEX_START, + TA_GFX_V9__GFX_TA_FS_AFIFO, + TA_GFX_V9__GFX_TA_FL_LFIFO, + TA_GFX_V9__GFX_TA_FX_LFIFO, + TA_GFX_V9__GFX_TA_FS_CFIFO, + TA_GFX_V9__GFX_TA_INDEX_END = TA_GFX_V9__GFX_TA_FS_CFIFO, + /* TCA*/ + TA_GFX_V9__GFX_TCA_INDEX_START, + TA_GFX_V9__GFX_TCA_HOLE_FIFO = TA_GFX_V9__GFX_TCA_INDEX_START, + TA_GFX_V9__GFX_TCA_REQ_FIFO, + TA_GFX_V9__GFX_TCA_INDEX_END = TA_GFX_V9__GFX_TCA_REQ_FIFO, + /* TCC (5 sub-ranges)*/ + TA_GFX_V9__GFX_TCC_INDEX_START, + /* TCC range 0*/ + TA_GFX_V9__GFX_TCC_INDEX0_START = TA_GFX_V9__GFX_TCC_INDEX_START, + TA_GFX_V9__GFX_TCC_CACHE_DATA = TA_GFX_V9__GFX_TCC_INDEX0_START, + TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1, + TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0, + TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1, + TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0, + TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1, + TA_GFX_V9__GFX_TCC_HIGH_RATE_TAG, + TA_GFX_V9__GFX_TCC_LOW_RATE_TAG, + TA_GFX_V9__GFX_TCC_INDEX0_END = TA_GFX_V9__GFX_TCC_LOW_RATE_TAG, + /* TCC range 1*/ + TA_GFX_V9__GFX_TCC_INDEX1_START, + TA_GFX_V9__GFX_TCC_IN_USE_DEC = TA_GFX_V9__GFX_TCC_INDEX1_START, + TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + TA_GFX_V9__GFX_TCC_INDEX1_END = + TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + /* TCC range 2*/ + TA_GFX_V9__GFX_TCC_INDEX2_START, + TA_GFX_V9__GFX_TCC_RETURN_DATA = TA_GFX_V9__GFX_TCC_INDEX2_START, + TA_GFX_V9__GFX_TCC_RETURN_CONTROL, + TA_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO, + TA_GFX_V9__GFX_TCC_WRITE_RETURN, + TA_GFX_V9__GFX_TCC_WRITE_CACHE_READ, + TA_GFX_V9__GFX_TCC_SRC_FIFO, + TA_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM, + TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + TA_GFX_V9__GFX_TCC_INDEX2_END = + TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + /* TCC range 3*/ + TA_GFX_V9__GFX_TCC_INDEX3_START, + TA_GFX_V9__GFX_TCC_LATENCY_FIFO = TA_GFX_V9__GFX_TCC_INDEX3_START, + TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + TA_GFX_V9__GFX_TCC_INDEX3_END = + TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + /* TCC range 4*/ + TA_GFX_V9__GFX_TCC_INDEX4_START, + TA_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN = + TA_GFX_V9__GFX_TCC_INDEX4_START, + TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + TA_GFX_V9__GFX_TCC_INDEX4_END = + TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + TA_GFX_V9__GFX_TCC_INDEX_END = TA_GFX_V9__GFX_TCC_INDEX4_END, + /* TCI*/ + TA_GFX_V9__GFX_TCI_WRITE_RAM, + /* TCP*/ + TA_GFX_V9__GFX_TCP_INDEX_START, + TA_GFX_V9__GFX_TCP_CACHE_RAM = TA_GFX_V9__GFX_TCP_INDEX_START, + TA_GFX_V9__GFX_TCP_LFIFO_RAM, + TA_GFX_V9__GFX_TCP_CMD_FIFO, + TA_GFX_V9__GFX_TCP_VM_FIFO, + TA_GFX_V9__GFX_TCP_DB_RAM, + TA_GFX_V9__GFX_TCP_UTCL1_LFIFO0, + TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + TA_GFX_V9__GFX_TCP_INDEX_END = TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + /* TD*/ + TA_GFX_V9__GFX_TD_INDEX_START, + TA_GFX_V9__GFX_TD_SS_FIFO_LO = TA_GFX_V9__GFX_TD_INDEX_START, + TA_GFX_V9__GFX_TD_SS_FIFO_HI, + TA_GFX_V9__GFX_TD_CS_FIFO, + TA_GFX_V9__GFX_TD_INDEX_END = TA_GFX_V9__GFX_TD_CS_FIFO, + /* EA (3 sub-ranges)*/ + TA_GFX_V9__GFX_EA_INDEX_START, + /* EA range 0*/ + TA_GFX_V9__GFX_EA_INDEX0_START = TA_GFX_V9__GFX_EA_INDEX_START, + TA_GFX_V9__GFX_EA_DRAMRD_CMDMEM = TA_GFX_V9__GFX_EA_INDEX0_START, + TA_GFX_V9__GFX_EA_DRAMWR_CMDMEM, + TA_GFX_V9__GFX_EA_DRAMWR_DATAMEM, + TA_GFX_V9__GFX_EA_RRET_TAGMEM, + TA_GFX_V9__GFX_EA_WRET_TAGMEM, + TA_GFX_V9__GFX_EA_GMIRD_CMDMEM, + TA_GFX_V9__GFX_EA_GMIWR_CMDMEM, + TA_GFX_V9__GFX_EA_GMIWR_DATAMEM, + TA_GFX_V9__GFX_EA_INDEX0_END = TA_GFX_V9__GFX_EA_GMIWR_DATAMEM, + /* EA range 1*/ + TA_GFX_V9__GFX_EA_INDEX1_START, + TA_GFX_V9__GFX_EA_DRAMRD_PAGEMEM = TA_GFX_V9__GFX_EA_INDEX1_START, + TA_GFX_V9__GFX_EA_DRAMWR_PAGEMEM, + TA_GFX_V9__GFX_EA_IORD_CMDMEM, + TA_GFX_V9__GFX_EA_IOWR_CMDMEM, + TA_GFX_V9__GFX_EA_IOWR_DATAMEM, + TA_GFX_V9__GFX_EA_GMIRD_PAGEMEM, + TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + TA_GFX_V9__GFX_EA_INDEX1_END = TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + /* EA range 2*/ + TA_GFX_V9__GFX_EA_INDEX2_START, + TA_GFX_V9__GFX_EA_MAM_D0MEM = TA_GFX_V9__GFX_EA_INDEX2_START, + TA_GFX_V9__GFX_EA_MAM_D1MEM, + TA_GFX_V9__GFX_EA_MAM_D2MEM, + TA_GFX_V9__GFX_EA_MAM_D3MEM, + TA_GFX_V9__GFX_EA_INDEX2_END = TA_GFX_V9__GFX_EA_MAM_D3MEM, + TA_GFX_V9__GFX_EA_INDEX_END = TA_GFX_V9__GFX_EA_INDEX2_END, + /* UTC VM L2 bank*/ + TA_GFX_V9__UTC_VML2_BANK_CACHE, + /* UTC VM walker*/ + TA_GFX_V9__UTC_VML2_WALKER, + /* UTC ATC L2 2MB cache*/ + TA_GFX_V9__UTC_ATCL2_CACHE_2M_BANK, + /* UTC ATC L2 4KB cache*/ + TA_GFX_V9__UTC_ATCL2_CACHE_4K_BANK, + TA_GFX_V9__GFX_MAX +}; + +struct ras_gfx_subblock_t { + unsigned char *name; + int ta_subblock; + int hw_supported_error_type; + int sw_supported_error_type; +}; + +#define RAS_GFX_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \ + [RAS_GFX_V9__##subblock] = { \ + #subblock, \ + TA_GFX_V9__##subblock, \ + ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \ + (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \ + } + +const struct ras_gfx_subblock_t ras_gfx_v9_0_subblocks[] = { + RAS_GFX_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0, + 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0, + 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0), +}; + +static int gfx_v9_0_get_ta_subblock(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock) +{ + const struct ras_gfx_subblock_t *gfx_subblock; + + if (subblock >= ARRAY_SIZE(ras_gfx_v9_0_subblocks)) + return -EINVAL; + + gfx_subblock = &ras_gfx_v9_0_subblocks[subblock]; + if (!gfx_subblock->name) + return -EPERM; + + if (!(gfx_subblock->hw_supported_error_type & error_type)) { + RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, hardware do not support type 0x%x\n", + gfx_subblock->name, error_type); + return -EPERM; + } + + if (!(gfx_subblock->sw_supported_error_type & error_type)) { + RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, driver do not support type 0x%x\n", + gfx_subblock->name, error_type); + return -EPERM; + } + + *ta_subblock = gfx_subblock->ta_subblock; + + return 0; +} + +const struct ras_gfx_ip_func gfx_ras_func_v9_0 = { + .get_ta_subblock = gfx_v9_0_get_ta_subblock, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h new file mode 100644 index 000000000000..659b56619747 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_GFX_V9_0_H__ +#define __RAS_GFX_V9_0_H__ + +enum ras_gfx_v9_subblock { + /* CPC */ + RAS_GFX_V9__GFX_CPC_INDEX_START = 0, + RAS_GFX_V9__GFX_CPC_SCRATCH = + RAS_GFX_V9__GFX_CPC_INDEX_START, + RAS_GFX_V9__GFX_CPC_UCODE, + RAS_GFX_V9__GFX_DC_STATE_ME1, + RAS_GFX_V9__GFX_DC_CSINVOC_ME1, + RAS_GFX_V9__GFX_DC_RESTORE_ME1, + RAS_GFX_V9__GFX_DC_STATE_ME2, + RAS_GFX_V9__GFX_DC_CSINVOC_ME2, + RAS_GFX_V9__GFX_DC_RESTORE_ME2, + RAS_GFX_V9__GFX_CPC_INDEX_END = + RAS_GFX_V9__GFX_DC_RESTORE_ME2, + /* CPF */ + RAS_GFX_V9__GFX_CPF_INDEX_START, + RAS_GFX_V9__GFX_CPF_ROQ_ME2 = + RAS_GFX_V9__GFX_CPF_INDEX_START, + RAS_GFX_V9__GFX_CPF_ROQ_ME1, + RAS_GFX_V9__GFX_CPF_TAG, + RAS_GFX_V9__GFX_CPF_INDEX_END = RAS_GFX_V9__GFX_CPF_TAG, + /* CPG */ + RAS_GFX_V9__GFX_CPG_INDEX_START, + RAS_GFX_V9__GFX_CPG_DMA_ROQ = + RAS_GFX_V9__GFX_CPG_INDEX_START, + RAS_GFX_V9__GFX_CPG_DMA_TAG, + RAS_GFX_V9__GFX_CPG_TAG, + RAS_GFX_V9__GFX_CPG_INDEX_END = RAS_GFX_V9__GFX_CPG_TAG, + /* GDS */ + RAS_GFX_V9__GFX_GDS_INDEX_START, + RAS_GFX_V9__GFX_GDS_MEM = RAS_GFX_V9__GFX_GDS_INDEX_START, + RAS_GFX_V9__GFX_GDS_INPUT_QUEUE, + RAS_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM, + RAS_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM, + RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM, + RAS_GFX_V9__GFX_GDS_INDEX_END = + RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM, + /* SPI */ + RAS_GFX_V9__GFX_SPI_SR_MEM, + /* SQ */ + RAS_GFX_V9__GFX_SQ_INDEX_START, + RAS_GFX_V9__GFX_SQ_SGPR = RAS_GFX_V9__GFX_SQ_INDEX_START, + RAS_GFX_V9__GFX_SQ_LDS_D, + RAS_GFX_V9__GFX_SQ_LDS_I, + RAS_GFX_V9__GFX_SQ_VGPR, + RAS_GFX_V9__GFX_SQ_INDEX_END = RAS_GFX_V9__GFX_SQ_VGPR, + /* SQC (3 ranges) */ + RAS_GFX_V9__GFX_SQC_INDEX_START, + /* SQC range 0 */ + RAS_GFX_V9__GFX_SQC_INDEX0_START = + RAS_GFX_V9__GFX_SQC_INDEX_START, + RAS_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO = + RAS_GFX_V9__GFX_SQC_INDEX0_START, + RAS_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF, + RAS_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO, + RAS_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF, + RAS_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO, + RAS_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF, + RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + RAS_GFX_V9__GFX_SQC_INDEX0_END = + RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + /* SQC range 1 */ + RAS_GFX_V9__GFX_SQC_INDEX1_START, + RAS_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM = + RAS_GFX_V9__GFX_SQC_INDEX1_START, + RAS_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + RAS_GFX_V9__GFX_SQC_INDEX1_END = + RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + /* SQC range 2 */ + RAS_GFX_V9__GFX_SQC_INDEX2_START, + RAS_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM = + RAS_GFX_V9__GFX_SQC_INDEX2_START, + RAS_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + RAS_GFX_V9__GFX_SQC_INDEX2_END = + RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + RAS_GFX_V9__GFX_SQC_INDEX_END = + RAS_GFX_V9__GFX_SQC_INDEX2_END, + /* TA */ + RAS_GFX_V9__GFX_TA_INDEX_START, + RAS_GFX_V9__GFX_TA_FS_DFIFO = + RAS_GFX_V9__GFX_TA_INDEX_START, + RAS_GFX_V9__GFX_TA_FS_AFIFO, + RAS_GFX_V9__GFX_TA_FL_LFIFO, + RAS_GFX_V9__GFX_TA_FX_LFIFO, + RAS_GFX_V9__GFX_TA_FS_CFIFO, + RAS_GFX_V9__GFX_TA_INDEX_END = RAS_GFX_V9__GFX_TA_FS_CFIFO, + /* TCA */ + RAS_GFX_V9__GFX_TCA_INDEX_START, + RAS_GFX_V9__GFX_TCA_HOLE_FIFO = + RAS_GFX_V9__GFX_TCA_INDEX_START, + RAS_GFX_V9__GFX_TCA_REQ_FIFO, + RAS_GFX_V9__GFX_TCA_INDEX_END = + RAS_GFX_V9__GFX_TCA_REQ_FIFO, + /* TCC (5 sub-ranges) */ + RAS_GFX_V9__GFX_TCC_INDEX_START, + /* TCC range 0 */ + RAS_GFX_V9__GFX_TCC_INDEX0_START = + RAS_GFX_V9__GFX_TCC_INDEX_START, + RAS_GFX_V9__GFX_TCC_CACHE_DATA = + RAS_GFX_V9__GFX_TCC_INDEX0_START, + RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1, + RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0, + RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1, + RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0, + RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1, + RAS_GFX_V9__GFX_TCC_HIGH_RATE_TAG, + RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG, + RAS_GFX_V9__GFX_TCC_INDEX0_END = + RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG, + /* TCC range 1 */ + RAS_GFX_V9__GFX_TCC_INDEX1_START, + RAS_GFX_V9__GFX_TCC_IN_USE_DEC = + RAS_GFX_V9__GFX_TCC_INDEX1_START, + RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + RAS_GFX_V9__GFX_TCC_INDEX1_END = + RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + /* TCC range 2 */ + RAS_GFX_V9__GFX_TCC_INDEX2_START, + RAS_GFX_V9__GFX_TCC_RETURN_DATA = + RAS_GFX_V9__GFX_TCC_INDEX2_START, + RAS_GFX_V9__GFX_TCC_RETURN_CONTROL, + RAS_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO, + RAS_GFX_V9__GFX_TCC_WRITE_RETURN, + RAS_GFX_V9__GFX_TCC_WRITE_CACHE_READ, + RAS_GFX_V9__GFX_TCC_SRC_FIFO, + RAS_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM, + RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + RAS_GFX_V9__GFX_TCC_INDEX2_END = + RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + /* TCC range 3 */ + RAS_GFX_V9__GFX_TCC_INDEX3_START, + RAS_GFX_V9__GFX_TCC_LATENCY_FIFO = + RAS_GFX_V9__GFX_TCC_INDEX3_START, + RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + RAS_GFX_V9__GFX_TCC_INDEX3_END = + RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + /* TCC range 4 */ + RAS_GFX_V9__GFX_TCC_INDEX4_START, + RAS_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN = + RAS_GFX_V9__GFX_TCC_INDEX4_START, + RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + RAS_GFX_V9__GFX_TCC_INDEX4_END = + RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + RAS_GFX_V9__GFX_TCC_INDEX_END = + RAS_GFX_V9__GFX_TCC_INDEX4_END, + /* TCI */ + RAS_GFX_V9__GFX_TCI_WRITE_RAM, + /* TCP */ + RAS_GFX_V9__GFX_TCP_INDEX_START, + RAS_GFX_V9__GFX_TCP_CACHE_RAM = + RAS_GFX_V9__GFX_TCP_INDEX_START, + RAS_GFX_V9__GFX_TCP_LFIFO_RAM, + RAS_GFX_V9__GFX_TCP_CMD_FIFO, + RAS_GFX_V9__GFX_TCP_VM_FIFO, + RAS_GFX_V9__GFX_TCP_DB_RAM, + RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO0, + RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + RAS_GFX_V9__GFX_TCP_INDEX_END = + RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + /* TD */ + RAS_GFX_V9__GFX_TD_INDEX_START, + RAS_GFX_V9__GFX_TD_SS_FIFO_LO = + RAS_GFX_V9__GFX_TD_INDEX_START, + RAS_GFX_V9__GFX_TD_SS_FIFO_HI, + RAS_GFX_V9__GFX_TD_CS_FIFO, + RAS_GFX_V9__GFX_TD_INDEX_END = RAS_GFX_V9__GFX_TD_CS_FIFO, + /* EA (3 sub-ranges) */ + RAS_GFX_V9__GFX_EA_INDEX_START, + /* EA range 0 */ + RAS_GFX_V9__GFX_EA_INDEX0_START = + RAS_GFX_V9__GFX_EA_INDEX_START, + RAS_GFX_V9__GFX_EA_DRAMRD_CMDMEM = + RAS_GFX_V9__GFX_EA_INDEX0_START, + RAS_GFX_V9__GFX_EA_DRAMWR_CMDMEM, + RAS_GFX_V9__GFX_EA_DRAMWR_DATAMEM, + RAS_GFX_V9__GFX_EA_RRET_TAGMEM, + RAS_GFX_V9__GFX_EA_WRET_TAGMEM, + RAS_GFX_V9__GFX_EA_GMIRD_CMDMEM, + RAS_GFX_V9__GFX_EA_GMIWR_CMDMEM, + RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM, + RAS_GFX_V9__GFX_EA_INDEX0_END = + RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM, + /* EA range 1 */ + RAS_GFX_V9__GFX_EA_INDEX1_START, + RAS_GFX_V9__GFX_EA_DRAMRD_PAGEMEM = + RAS_GFX_V9__GFX_EA_INDEX1_START, + RAS_GFX_V9__GFX_EA_DRAMWR_PAGEMEM, + RAS_GFX_V9__GFX_EA_IORD_CMDMEM, + RAS_GFX_V9__GFX_EA_IOWR_CMDMEM, + RAS_GFX_V9__GFX_EA_IOWR_DATAMEM, + RAS_GFX_V9__GFX_EA_GMIRD_PAGEMEM, + RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + RAS_GFX_V9__GFX_EA_INDEX1_END = + RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + /* EA range 2 */ + RAS_GFX_V9__GFX_EA_INDEX2_START, + RAS_GFX_V9__GFX_EA_MAM_D0MEM = + RAS_GFX_V9__GFX_EA_INDEX2_START, + RAS_GFX_V9__GFX_EA_MAM_D1MEM, + RAS_GFX_V9__GFX_EA_MAM_D2MEM, + RAS_GFX_V9__GFX_EA_MAM_D3MEM, + RAS_GFX_V9__GFX_EA_INDEX2_END = + RAS_GFX_V9__GFX_EA_MAM_D3MEM, + RAS_GFX_V9__GFX_EA_INDEX_END = + RAS_GFX_V9__GFX_EA_INDEX2_END, + /* UTC VM L2 bank */ + RAS_GFX_V9__UTC_VML2_BANK_CACHE, + /* UTC VM walker */ + RAS_GFX_V9__UTC_VML2_WALKER, + /* UTC ATC L2 2MB cache */ + RAS_GFX_V9__UTC_ATCL2_CACHE_2M_BANK, + /* UTC ATC L2 4KB cache */ + RAS_GFX_V9__UTC_ATCL2_CACHE_4K_BANK, + RAS_GFX_V9__GFX_MAX +}; + +extern const struct ras_gfx_ip_func gfx_ras_func_v9_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c new file mode 100644 index 000000000000..0a838fdcb2f6 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_core_status.h" +#include "ras_log_ring.h" + +#define RAS_LOG_MAX_QUERY_SIZE 0xC000 +#define RAS_LOG_MEM_TEMP_SIZE 0x200 +#define RAS_LOG_MEMPOOL_SIZE \ + (RAS_LOG_MAX_QUERY_SIZE + RAS_LOG_MEM_TEMP_SIZE) + +#define BATCH_IDX_TO_TREE_IDX(batch_idx, sn) (((batch_idx) << 8) | (sn)) + +static const uint64_t ras_rma_aca_reg[ACA_REG_MAX_COUNT] = { + [ACA_REG_IDX__CTL] = 0x1, + [ACA_REG_IDX__STATUS] = 0xB000000000000137, + [ACA_REG_IDX__ADDR] = 0x0, + [ACA_REG_IDX__MISC0] = 0x0, + [ACA_REG_IDX__CONFG] = 0x1ff00000002, + [ACA_REG_IDX__IPID] = 0x9600000000, + [ACA_REG_IDX__SYND] = 0x0, +}; + +static uint64_t ras_log_ring_get_logged_ecc_count(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + uint64_t count = 0; + + if (log_ring->logged_ecc_count < 0) { + RAS_DEV_WARN(ras_core->dev, + "Error: the logged ras count should not less than 0!\n"); + count = 0; + } else { + count = log_ring->logged_ecc_count; + } + + if (count > RAS_LOG_MEMPOOL_SIZE) + RAS_DEV_WARN(ras_core->dev, + "Error: the logged ras count is out of range!\n"); + + return count; +} + +static int ras_log_ring_add_data(struct ras_core_context *ras_core, + struct ras_log_info *log, struct ras_log_batch_tag *batch_tag) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + unsigned long flags = 0; + int ret = 0; + + if (batch_tag && (batch_tag->sub_seqno >= MAX_RECORD_PER_BATCH)) { + RAS_DEV_ERR(ras_core->dev, + "Invalid batch sub seqno:%d, batch:0x%llx\n", + batch_tag->sub_seqno, batch_tag->batch_id); + return -EINVAL; + } + + spin_lock_irqsave(&log_ring->spin_lock, flags); + if (batch_tag) { + log->seqno = + BATCH_IDX_TO_TREE_IDX(batch_tag->batch_id, batch_tag->sub_seqno); + batch_tag->sub_seqno++; + } else { + log->seqno = BATCH_IDX_TO_TREE_IDX(log_ring->mono_upward_batch_id, 0); + log_ring->mono_upward_batch_id++; + } + ret = radix_tree_insert(&log_ring->ras_log_root, log->seqno, log); + if (!ret) + log_ring->logged_ecc_count++; + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + if (ret) { + RAS_DEV_ERR(ras_core->dev, + "Failed to add ras log! seqno:0x%llx, ret:%d\n", + log->seqno, ret); + mempool_free(log, log_ring->ras_log_mempool); + } + + return ret; +} + +static int ras_log_ring_delete_data(struct ras_core_context *ras_core, uint32_t count) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + unsigned long flags = 0; + uint32_t i = 0, j = 0; + uint64_t batch_id, idx; + void *data; + int ret = -ENODATA; + + if (count > ras_log_ring_get_logged_ecc_count(ras_core)) + return -EINVAL; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + batch_id = log_ring->last_del_batch_id; + while (batch_id < log_ring->mono_upward_batch_id) { + for (j = 0; j < MAX_RECORD_PER_BATCH; j++) { + idx = BATCH_IDX_TO_TREE_IDX(batch_id, j); + data = radix_tree_delete(&log_ring->ras_log_root, idx); + if (data) { + mempool_free(data, log_ring->ras_log_mempool); + log_ring->logged_ecc_count--; + i++; + } + } + batch_id = ++log_ring->last_del_batch_id; + if (i >= count) { + ret = 0; + break; + } + } + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + return ret; +} + +static void ras_log_ring_clear_log_tree(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + uint64_t batch_id, idx; + unsigned long flags = 0; + void *data; + int j; + + if ((log_ring->mono_upward_batch_id <= log_ring->last_del_batch_id) && + !log_ring->logged_ecc_count) + return; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + batch_id = log_ring->last_del_batch_id; + while (batch_id < log_ring->mono_upward_batch_id) { + for (j = 0; j < MAX_RECORD_PER_BATCH; j++) { + idx = BATCH_IDX_TO_TREE_IDX(batch_id, j); + data = radix_tree_delete(&log_ring->ras_log_root, idx); + if (data) { + mempool_free(data, log_ring->ras_log_mempool); + log_ring->logged_ecc_count--; + } + } + batch_id++; + } + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + +} + +int ras_log_ring_sw_init(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + + memset(log_ring, 0, sizeof(*log_ring)); + + log_ring->ras_log_mempool = mempool_create_kmalloc_pool( + RAS_LOG_MEMPOOL_SIZE, sizeof(struct ras_log_info)); + if (!log_ring->ras_log_mempool) + return -ENOMEM; + + INIT_RADIX_TREE(&log_ring->ras_log_root, GFP_KERNEL); + + spin_lock_init(&log_ring->spin_lock); + + return 0; +} + +int ras_log_ring_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + + ras_log_ring_clear_log_tree(ras_core); + log_ring->logged_ecc_count = 0; + log_ring->last_del_batch_id = 0; + log_ring->mono_upward_batch_id = 0; + + mempool_destroy(log_ring->ras_log_mempool); + + return 0; +} + +struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + struct ras_log_batch_tag *batch_tag; + unsigned long flags = 0; + + batch_tag = kzalloc(sizeof(*batch_tag), GFP_KERNEL); + if (!batch_tag) + return NULL; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + batch_tag->batch_id = log_ring->mono_upward_batch_id; + log_ring->mono_upward_batch_id++; + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + batch_tag->sub_seqno = 0; + batch_tag->timestamp = ras_core_get_utc_second_timestamp(ras_core); + return batch_tag; +} + +void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core, + struct ras_log_batch_tag *batch_tag) +{ + kfree(batch_tag); +} + +void ras_log_ring_add_log_event(struct ras_core_context *ras_core, + enum ras_log_event event, void *data, struct ras_log_batch_tag *batch_tag) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + struct device_system_info dev_info = {0}; + struct ras_log_info *log; + uint64_t socket_id; + void *obj; + + obj = mempool_alloc_preallocated(log_ring->ras_log_mempool); + if (!obj || + (ras_log_ring_get_logged_ecc_count(ras_core) >= RAS_LOG_MEMPOOL_SIZE)) { + ras_log_ring_delete_data(ras_core, RAS_LOG_MEM_TEMP_SIZE); + if (!obj) + obj = mempool_alloc_preallocated(log_ring->ras_log_mempool); + } + + if (!obj) { + RAS_DEV_ERR(ras_core->dev, "ERROR: Failed to alloc ras log buffer!\n"); + return; + } + + log = (struct ras_log_info *)obj; + + memset(log, 0, sizeof(*log)); + log->timestamp = + batch_tag ? batch_tag->timestamp : ras_core_get_utc_second_timestamp(ras_core); + log->event = event; + + if (data) + memcpy(&log->aca_reg, data, sizeof(log->aca_reg)); + + if (event == RAS_LOG_EVENT_RMA) { + memcpy(&log->aca_reg, ras_rma_aca_reg, sizeof(log->aca_reg)); + ras_core_get_device_system_info(ras_core, &dev_info); + socket_id = dev_info.socket_id; + log->aca_reg.regs[ACA_REG_IDX__IPID] |= ((socket_id / 4) & 0x01); + log->aca_reg.regs[ACA_REG_IDX__IPID] |= (((socket_id % 4) & 0x3) << 44); + } + + ras_log_ring_add_data(ras_core, log, batch_tag); +} + +static struct ras_log_info *ras_log_ring_lookup_data(struct ras_core_context *ras_core, + uint64_t idx) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + unsigned long flags = 0; + void *data; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + data = radix_tree_lookup(&log_ring->ras_log_root, idx); + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + return (struct ras_log_info *)data; +} + +int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_id, + struct ras_log_info **log_arr, uint32_t arr_num) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + uint32_t i, idx, count = 0; + void *data; + + if ((batch_id >= log_ring->mono_upward_batch_id) || + (batch_id < log_ring->last_del_batch_id)) + return -EINVAL; + + for (i = 0; i < MAX_RECORD_PER_BATCH; i++) { + idx = BATCH_IDX_TO_TREE_IDX(batch_id, i); + data = ras_log_ring_lookup_data(ras_core, idx); + if (data) { + log_arr[count++] = data; + if (count >= arr_num) + break; + } + } + + return count; +} + +int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core, + struct ras_log_batch_overview *overview) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + + overview->logged_batch_count = + log_ring->mono_upward_batch_id - log_ring->last_del_batch_id; + overview->last_batch_id = log_ring->mono_upward_batch_id; + overview->first_batch_id = log_ring->last_del_batch_id; + + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h new file mode 100644 index 000000000000..0ff6cc35678d --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_LOG_RING_H__ +#define __RAS_LOG_RING_H__ +#include "ras_aca.h" + +#define MAX_RECORD_PER_BATCH 32 + +#define RAS_LOG_SEQNO_TO_BATCH_IDX(seqno) ((seqno) >> 8) + +enum ras_log_event { + RAS_LOG_EVENT_NONE, + RAS_LOG_EVENT_UE, + RAS_LOG_EVENT_DE, + RAS_LOG_EVENT_CE, + RAS_LOG_EVENT_POISON_CREATION, + RAS_LOG_EVENT_POISON_CONSUMPTION, + RAS_LOG_EVENT_RMA, + RAS_LOG_EVENT_COUNT_MAX, +}; + +struct ras_aca_reg { + uint64_t regs[ACA_REG_MAX_COUNT]; +}; + +struct ras_log_info { + uint64_t seqno; + uint64_t timestamp; + enum ras_log_event event; + union { + struct ras_aca_reg aca_reg; + }; +}; + +struct ras_log_batch_tag { + uint64_t batch_id; + uint64_t timestamp; + uint32_t sub_seqno; +}; + +struct ras_log_ring { + void *ras_log_mempool; + struct radix_tree_root ras_log_root; + spinlock_t spin_lock; + uint64_t mono_upward_batch_id; + uint64_t last_del_batch_id; + int logged_ecc_count; +}; + +struct ras_log_batch_overview { + uint64_t first_batch_id; + uint64_t last_batch_id; + uint32_t logged_batch_count; +}; + +struct ras_core_context; + +int ras_log_ring_sw_init(struct ras_core_context *ras_core); +int ras_log_ring_sw_fini(struct ras_core_context *ras_core); + +struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core); +void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core, + struct ras_log_batch_tag *tag); +void ras_log_ring_add_log_event(struct ras_core_context *ras_core, + enum ras_log_event event, void *data, struct ras_log_batch_tag *tag); + +int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_idx, + struct ras_log_info **log_arr, uint32_t arr_num); + +int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core, + struct ras_log_batch_overview *overview); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c new file mode 100644 index 000000000000..f3321df85021 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_mp1.h" +#include "ras_mp1_v13_0.h" + +static const struct ras_mp1_ip_func *ras_mp1_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 12): + return &mp1_ras_func_v13_0; + default: + RAS_DEV_ERR(ras_core->dev, + "MP1 ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_mp1_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + + return mp1->ip_func->get_valid_bank_count(ras_core, type, count); +} + +int ras_mp1_dump_bank(struct ras_core_context *ras_core, + u32 type, u32 idx, u32 reg_idx, u64 *val) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + + return mp1->ip_func->dump_valid_bank(ras_core, type, idx, reg_idx, val); +} + +int ras_mp1_hw_init(struct ras_core_context *ras_core) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + + mp1->mp1_ip_version = ras_core->config->mp1_ip_version; + mp1->sys_func = ras_core->config->mp1_cfg.mp1_sys_fn; + if (!mp1->sys_func) { + RAS_DEV_ERR(ras_core->dev, "RAS mp1 sys function not configured!\n"); + return -EINVAL; + } + + mp1->ip_func = ras_mp1_get_ip_funcs(ras_core, mp1->mp1_ip_version); + + return mp1->ip_func ? RAS_CORE_OK : -EINVAL; +} + +int ras_mp1_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h new file mode 100644 index 000000000000..de1d08286f41 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_MP1_H__ +#define __RAS_MP1_H__ +#include "ras.h" + +enum ras_err_type; +struct ras_mp1_ip_func { + int (*get_valid_bank_count)(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count); + int (*dump_valid_bank)(struct ras_core_context *ras_core, + enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val); +}; + +struct ras_mp1 { + uint32_t mp1_ip_version; + const struct ras_mp1_ip_func *ip_func; + const struct ras_mp1_sys_func *sys_func; +}; + +int ras_mp1_hw_init(struct ras_core_context *ras_core); +int ras_mp1_hw_fini(struct ras_core_context *ras_core); + +int ras_mp1_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count); + +int ras_mp1_dump_bank(struct ras_core_context *ras_core, + u32 ecc_type, u32 idx, u32 reg_idx, u64 *val); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c new file mode 100644 index 000000000000..310d39fc816b --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_mp1.h" +#include "ras_core_status.h" +#include "ras_mp1_v13_0.h" + +#define RAS_MP1_MSG_QueryValidMcaCount 0x36 +#define RAS_MP1_MSG_McaBankDumpDW 0x37 +#define RAS_MP1_MSG_ClearMcaOnRead 0x39 +#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A +#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B + +#define MAX_UE_BANKS_PER_QUERY 12 +#define MAX_CE_BANKS_PER_QUERY 12 + +static int mp1_v13_0_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + const struct ras_mp1_sys_func *sys_func = mp1->sys_func; + uint32_t bank_count = 0; + u32 msg; + int ret; + + if (!count) + return -EINVAL; + + if (!sys_func || !sys_func->mp1_get_valid_bank_count) + return -RAS_CORE_NOT_SUPPORTED; + + switch (type) { + case RAS_ERR_TYPE__UE: + msg = RAS_MP1_MSG_QueryValidMcaCount; + break; + case RAS_ERR_TYPE__CE: + case RAS_ERR_TYPE__DE: + msg = RAS_MP1_MSG_QueryValidMcaCeCount; + break; + default: + return -EINVAL; + } + + ret = sys_func->mp1_get_valid_bank_count(ras_core, msg, &bank_count); + if (!ret) { + if (((type == RAS_ERR_TYPE__UE) && (bank_count >= MAX_UE_BANKS_PER_QUERY)) || + ((type == RAS_ERR_TYPE__CE) && (bank_count >= MAX_CE_BANKS_PER_QUERY))) + return -EINVAL; + + *count = bank_count; + } + + return ret; +} + +static int mp1_v13_0_dump_bank(struct ras_core_context *ras_core, + enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + const struct ras_mp1_sys_func *sys_func = mp1->sys_func; + u32 msg; + + if (!sys_func || !sys_func->mp1_dump_valid_bank) + return -RAS_CORE_NOT_SUPPORTED; + + switch (type) { + case RAS_ERR_TYPE__UE: + msg = RAS_MP1_MSG_McaBankDumpDW; + break; + case RAS_ERR_TYPE__CE: + case RAS_ERR_TYPE__DE: + msg = RAS_MP1_MSG_McaBankCeDumpDW; + break; + default: + return -EINVAL; + } + + return sys_func->mp1_dump_valid_bank(ras_core, msg, idx, reg_idx, val); +} + +const struct ras_mp1_ip_func mp1_ras_func_v13_0 = { + .get_valid_bank_count = mp1_v13_0_get_bank_count, + .dump_valid_bank = mp1_v13_0_dump_bank, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h new file mode 100644 index 000000000000..2edfdb5f6a75 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_MP1_V13_0_H__ +#define __RAS_MP1_V13_0_H__ +#include "ras_mp1.h" + +extern const struct ras_mp1_ip_func mp1_ras_func_v13_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c new file mode 100644 index 000000000000..bfddd104d548 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_nbio.h" +#include "ras_nbio_v7_9.h" + +static const struct ras_nbio_ip_func *ras_nbio_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(7, 9, 0): + case IP_VERSION(7, 9, 1): + return &ras_nbio_v7_9; + default: + RAS_DEV_ERR(ras_core->dev, + "NBIO ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_nbio_hw_init(struct ras_core_context *ras_core) +{ + struct ras_nbio *nbio = &ras_core->ras_nbio; + + nbio->nbio_ip_version = ras_core->config->nbio_ip_version; + nbio->sys_func = ras_core->config->nbio_cfg.nbio_sys_fn; + if (!nbio->sys_func) { + RAS_DEV_ERR(ras_core->dev, "RAS nbio sys function not configured!\n"); + return -EINVAL; + } + + nbio->ip_func = ras_nbio_get_ip_funcs(ras_core, nbio->nbio_ip_version); + if (!nbio->ip_func) + return -EINVAL; + + if (nbio->sys_func) { + if (nbio->sys_func->set_ras_controller_irq_state) + nbio->sys_func->set_ras_controller_irq_state(ras_core, true); + if (nbio->sys_func->set_ras_err_event_athub_irq_state) + nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, true); + } + + return 0; +} + +int ras_nbio_hw_fini(struct ras_core_context *ras_core) +{ + struct ras_nbio *nbio = &ras_core->ras_nbio; + + if (nbio->sys_func) { + if (nbio->sys_func->set_ras_controller_irq_state) + nbio->sys_func->set_ras_controller_irq_state(ras_core, false); + if (nbio->sys_func->set_ras_err_event_athub_irq_state) + nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, false); + } + + return 0; +} + +bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data) +{ + struct ras_nbio *nbio = &ras_core->ras_nbio; + + if (nbio->ip_func) { + if (nbio->ip_func->handle_ras_controller_intr_no_bifring) + nbio->ip_func->handle_ras_controller_intr_no_bifring(ras_core); + if (nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring) + nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring(ras_core); + } + + return true; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h new file mode 100644 index 000000000000..0a1313e59a02 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_NBIO_H__ +#define __RAS_NBIO_H__ +#include "ras.h" + +struct ras_core_context; + +struct ras_nbio_ip_func { + int (*handle_ras_controller_intr_no_bifring)(struct ras_core_context *ras_core); + int (*handle_ras_err_event_athub_intr_no_bifring)(struct ras_core_context *ras_core); + uint32_t (*get_memory_partition_mode)(struct ras_core_context *ras_core); +}; + +struct ras_nbio { + uint32_t nbio_ip_version; + const struct ras_nbio_ip_func *ip_func; + const struct ras_nbio_sys_func *sys_func; +}; + +int ras_nbio_hw_init(struct ras_core_context *ras_core); +int ras_nbio_hw_fini(struct ras_core_context *ras_core); +bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c new file mode 100644 index 000000000000..f17d708ec668 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_nbio_v7_9.h" + +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L + +#define regBIF_BX0_BIF_DOORBELL_INT_CNTL_BASE_IDX 2 +#define regBIF_BX0_BIF_DOORBELL_INT_CNTL 0x00fe + +#define regBIF_BX0_BIF_INTR_CNTL 0x0101 +#define regBIF_BX0_BIF_INTR_CNTL_BASE_IDX 2 + +/* BIF_BX0_BIF_INTR_CNTL */ +#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0 +#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L + +#define regBIF_BX_PF0_PARTITION_MEM_STATUS 0x0164 +#define regBIF_BX_PF0_PARTITION_MEM_STATUS_BASE_IDX 2 +/* BIF_BX_PF0_PARTITION_MEM_STATUS */ +#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE__SHIFT 0x0 +#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE__SHIFT 0x4 +#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE_MASK 0x0000000FL +#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE_MASK 0x00000FF0L + + +static int nbio_v7_9_handle_ras_controller_intr_no_bifring(struct ras_core_context *ras_core) +{ + uint32_t bif_doorbell_intr_cntl = 0; + + bif_doorbell_intr_cntl = + RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, + RAS_CNTLR_INTERRUPT_CLEAR, 1); + + RAS_DEV_WREG32_SOC15(ras_core->dev, + NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + /* TODO: handle ras controller interrupt */ + } + + return 0; +} + +static int nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct ras_core_context *ras_core) +{ + uint32_t bif_doorbell_intr_cntl = 0; + int ret = 0; + + bif_doorbell_intr_cntl = + RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); + + RAS_DEV_WREG32_SOC15(ras_core->dev, + NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + ret = ras_core_handle_fatal_error(ras_core); + } + + return ret; +} + +static uint32_t nbio_v7_9_get_memory_partition_mode(struct ras_core_context *ras_core) +{ + uint32_t mem_status; + uint32_t mem_mode; + + mem_status = + RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS); + + /* Each bit represents a mode 1-8*/ + mem_mode = REG_GET_FIELD(mem_status, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE); + + return ffs(mem_mode); +} + +const struct ras_nbio_ip_func ras_nbio_v7_9 = { + .handle_ras_controller_intr_no_bifring = + nbio_v7_9_handle_ras_controller_intr_no_bifring, + .handle_ras_err_event_athub_intr_no_bifring = + nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring, + .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h new file mode 100644 index 000000000000..8711c82a927f --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_NBIO_V7_9_H__ +#define __RAS_NBIO_V7_9_H__ +#include "ras_nbio.h" + +extern const struct ras_nbio_ip_func ras_nbio_v7_9; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.c b/drivers/gpu/drm/amd/ras/rascore/ras_process.c new file mode 100644 index 000000000000..3267dcdb169c --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_process.h" + +#define RAS_EVENT_FIFO_SIZE (128 * sizeof(struct ras_event_req)) + +#define RAS_POLLING_ECC_TIMEOUT 300 + +static int ras_process_put_event(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + int ret; + + ret = kfifo_in_spinlocked(&ras_proc->event_fifo, + req, sizeof(*req), &ras_proc->fifo_spinlock); + if (!ret) { + RAS_DEV_ERR(ras_core->dev, "Poison message fifo is full!\n"); + return -ENOSPC; + } + + return 0; +} + +static int ras_process_add_reset_gpu_event(struct ras_core_context *ras_core, + uint32_t reset_cause) +{ + struct ras_event_req req = {0}; + + req.reset = reset_cause; + + return ras_process_put_event(ras_core, &req); +} + +static int ras_process_get_event(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + + return kfifo_out_spinlocked(&ras_proc->event_fifo, + req, sizeof(*req), &ras_proc->fifo_spinlock); +} + +static void ras_process_clear_event_fifo(struct ras_core_context *ras_core) +{ + struct ras_event_req req; + int ret; + + do { + ret = ras_process_get_event(ras_core, &req); + } while (ret); +} + +#define AMDGPU_RAS_WAITING_DATA_READY 200 +static int ras_process_umc_event(struct ras_core_context *ras_core, + uint32_t event_count) +{ + struct ras_ecc_count ecc_data; + int ret = 0; + uint32_t timeout = 0; + uint32_t detected_de_count = 0; + + do { + memset(&ecc_data, 0, sizeof(ecc_data)); + ret = ras_core_update_ecc_info(ras_core); + if (ret) + return ret; + + ret = ras_core_query_block_ecc_data(ras_core, RAS_BLOCK_ID__UMC, &ecc_data); + if (ret) + return ret; + + if (ecc_data.new_de_count) { + detected_de_count += ecc_data.new_de_count; + timeout = 0; + } else { + if (!timeout && event_count) + timeout = AMDGPU_RAS_WAITING_DATA_READY; + + if (timeout) { + if (!--timeout) + break; + + msleep(1); + } + } + } while (detected_de_count < event_count); + + if (detected_de_count && ras_core_gpu_is_rma(ras_core)) + ras_process_add_reset_gpu_event(ras_core, GPU_RESET_CAUSE_RMA); + + return 0; +} + +static int ras_process_non_umc_event(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + struct ras_event_req req; + uint32_t event_count = kfifo_len(&ras_proc->event_fifo); + uint32_t reset_flags = 0; + int ret = 0, i; + + for (i = 0; i < event_count; i++) { + memset(&req, 0, sizeof(req)); + ret = ras_process_get_event(ras_core, &req); + if (!ret) + continue; + + ras_core_event_notify(ras_core, + RAS_EVENT_ID__POISON_CONSUMPTION, &req); + + reset_flags |= req.reset; + + if (req.reset == GPU_RESET_CAUSE_RMA) + continue; + + if (req.reset) + RAS_DEV_INFO(ras_core->dev, + "{%llu} GPU reset for %s RAS poison consumption is issued!\n", + req.seqno, ras_core_get_ras_block_name(req.block)); + else + RAS_DEV_INFO(ras_core->dev, + "{%llu} %s RAS poison consumption is issued!\n", + req.seqno, ras_core_get_ras_block_name(req.block)); + } + + if (reset_flags) { + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__RESET_GPU, &reset_flags); + if (!ret && (reset_flags & GPU_RESET_CAUSE_RMA)) + return -RAS_CORE_GPU_IN_MODE1_RESET; + } + + return ret; +} + +int ras_process_handle_ras_event(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + uint32_t umc_event_count; + int ret; + + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN, NULL); + if (ret) + return ret; + + ras_aca_clear_fatal_flag(ras_core); + ras_umc_log_pending_bad_bank(ras_core); + + do { + umc_event_count = atomic_read(&ras_proc->umc_interrupt_count); + ret = ras_process_umc_event(ras_core, umc_event_count); + if (ret == -RAS_CORE_GPU_IN_MODE1_RESET) + break; + + if (umc_event_count) + atomic_sub(umc_event_count, &ras_proc->umc_interrupt_count); + } while (atomic_read(&ras_proc->umc_interrupt_count)); + + if ((ret != -RAS_CORE_GPU_IN_MODE1_RESET) && + (kfifo_len(&ras_proc->event_fifo))) + ret = ras_process_non_umc_event(ras_core); + + if (ret == -RAS_CORE_GPU_IN_MODE1_RESET) { + /* Clear poison fifo */ + ras_process_clear_event_fifo(ras_core); + atomic_set(&ras_proc->umc_interrupt_count, 0); + } + + ras_core_event_notify(ras_core, + RAS_EVENT_ID__RAS_EVENT_PROC_END, NULL); + return ret; +} + +static int thread_wait_condition(void *param) +{ + struct ras_process *ras_proc = (struct ras_process *)param; + + return (kthread_should_stop() || + atomic_read(&ras_proc->ras_interrupt_req)); +} + +static int ras_process_thread(void *context) +{ + struct ras_core_context *ras_core = (struct ras_core_context *)context; + struct ras_process *ras_proc = &ras_core->ras_proc; + + while (!kthread_should_stop()) { + ras_wait_event_interruptible_timeout(&ras_proc->ras_process_wq, + thread_wait_condition, ras_proc, + msecs_to_jiffies(RAS_POLLING_ECC_TIMEOUT)); + + if (kthread_should_stop()) + break; + + if (!ras_core->is_initialized) + continue; + + atomic_set(&ras_proc->ras_interrupt_req, 0); + + if (ras_core_gpu_in_reset(ras_core)) + continue; + + if (ras_core->sys_fn && ras_core->sys_fn->async_handle_ras_event) + ras_core->sys_fn->async_handle_ras_event(ras_core, NULL); + else + ras_process_handle_ras_event(ras_core); + } + + return 0; +} + +int ras_process_init(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + int ret; + + ret = kfifo_alloc(&ras_proc->event_fifo, RAS_EVENT_FIFO_SIZE, GFP_KERNEL); + if (ret) + return ret; + + spin_lock_init(&ras_proc->fifo_spinlock); + + init_waitqueue_head(&ras_proc->ras_process_wq); + + ras_proc->ras_process_thread = kthread_run(ras_process_thread, + (void *)ras_core, "ras_process_thread"); + if (!ras_proc->ras_process_thread) { + RAS_DEV_ERR(ras_core->dev, "Failed to create ras_process_thread.\n"); + ret = -ENOMEM; + goto err; + } + + return 0; + +err: + ras_process_fini(ras_core); + return ret; +} + +int ras_process_fini(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + + if (ras_proc->ras_process_thread) { + kthread_stop(ras_proc->ras_process_thread); + ras_proc->ras_process_thread = NULL; + } + + kfifo_free(&ras_proc->event_fifo); + + return 0; +} + +static int ras_process_add_umc_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + + atomic_inc(&ras_proc->umc_interrupt_count); + atomic_inc(&ras_proc->ras_interrupt_req); + + wake_up(&ras_proc->ras_process_wq); + return 0; +} + +static int ras_process_add_non_umc_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + int ret; + + ret = ras_process_put_event(ras_core, req); + if (!ret) { + atomic_inc(&ras_proc->ras_interrupt_req); + wake_up(&ras_proc->ras_process_wq); + } + + return ret; +} + +int ras_process_add_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req, bool is_umc) +{ + int ret; + + if (!ras_core) + return -EINVAL; + + if (!ras_core->is_initialized) + return -EPERM; + + if (is_umc) + ret = ras_process_add_umc_interrupt_req(ras_core, req); + else + ret = ras_process_add_non_umc_interrupt_req(ras_core, req); + + return ret; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.h b/drivers/gpu/drm/amd/ras/rascore/ras_process.h new file mode 100644 index 000000000000..28458b50510e --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_PROCESS_H__ +#define __RAS_PROCESS_H__ + +struct ras_event_req { + uint64_t seqno; + uint32_t idx_vf; + uint32_t block; + uint16_t pasid; + uint32_t reset; + void *pasid_fn; + void *data; +}; + +struct ras_process { + void *dev; + void *ras_process_thread; + wait_queue_head_t ras_process_wq; + atomic_t ras_interrupt_req; + atomic_t umc_interrupt_count; + struct kfifo event_fifo; + spinlock_t fifo_spinlock; +}; + +struct ras_core_context; +int ras_process_init(struct ras_core_context *ras_core); +int ras_process_fini(struct ras_core_context *ras_core); +int ras_process_handle_ras_event(struct ras_core_context *ras_core); +int ras_process_add_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req, bool is_umc); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c new file mode 100644 index 000000000000..ccdb42d2dd60 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c @@ -0,0 +1,750 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_ta_if.h" +#include "ras_psp.h" +#include "ras_psp_v13_0.h" + +/* position of instance value in sub_block_index of + * ta_ras_trigger_error_input, the sub block uses lower 12 bits + */ +#define RAS_TA_INST_MASK 0xfffff000 +#define RAS_TA_INST_SHIFT 0xc + +static const struct ras_psp_ip_func *ras_psp_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 12): + return &ras_psp_v13_0; + default: + RAS_DEV_ERR(ras_core->dev, + "psp ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +static int ras_psp_sync_system_ras_psp_status(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + struct ras_psp_sys_status status = {0}; + int ret; + + if (psp->sys_func && psp->sys_func->get_ras_psp_system_status) { + ret = psp->sys_func->get_ras_psp_system_status(ras_core, &status); + if (ret) + return ret; + + if (status.initialized) { + ta_ctx->preload_ras_ta_enabled = true; + ta_ctx->ras_ta_initialized = status.initialized; + ta_ctx->session_id = status.session_id; + } + + psp_ctx->external_mutex = status.psp_cmd_mutex; + } + + return 0; +} + +static int ras_psp_get_ras_ta_init_param(struct ras_core_context *ras_core, + struct ras_ta_init_param *ras_ta_param) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + if (psp->sys_func && psp->sys_func->get_ras_ta_init_param) + return psp->sys_func->get_ras_ta_init_param(ras_core, ras_ta_param); + + RAS_DEV_ERR(ras_core->dev, "Not config get_ras_ta_init_param API!!\n"); + return -EACCES; +} + +static struct gpu_mem_block *ras_psp_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type) +{ + struct ras_psp *psp = &ras_core->ras_psp; + struct gpu_mem_block *gpu_mem = NULL; + int ret; + + switch (mem_type) { + case GPU_MEM_TYPE_RAS_PSP_RING: + gpu_mem = &psp->psp_ring.ras_ring_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_PSP_CMD: + gpu_mem = &psp->psp_ctx.psp_cmd_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_PSP_FENCE: + gpu_mem = &psp->psp_ctx.out_fence_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_TA_FW: + gpu_mem = &psp->ta_ctx.fw_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_TA_CMD: + gpu_mem = &psp->ta_ctx.cmd_gpu_mem; + break; + default: + return NULL; + } + + if (!gpu_mem->ref_count) { + ret = ras_core_get_gpu_mem(ras_core, mem_type, gpu_mem); + if (ret) + return NULL; + gpu_mem->mem_type = mem_type; + } + + gpu_mem->ref_count++; + + return gpu_mem; +} + +static int ras_psp_put_gpu_mem(struct ras_core_context *ras_core, + struct gpu_mem_block *gpu_mem) +{ + if (!gpu_mem) + return 0; + + gpu_mem->ref_count--; + + if (gpu_mem->ref_count > 0) { + return 0; + } else if (gpu_mem->ref_count < 0) { + RAS_DEV_WARN(ras_core->dev, + "Duplicate free gpu memory %u\n", gpu_mem->mem_type); + } else { + ras_core_put_gpu_mem(ras_core, gpu_mem->mem_type, gpu_mem); + memset(gpu_mem, 0, sizeof(*gpu_mem)); + } + + return 0; +} + +static void __acquire_psp_cmd_lock(struct ras_core_context *ras_core) +{ + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + + if (psp_ctx->external_mutex) + mutex_lock(psp_ctx->external_mutex); + else + mutex_lock(&psp_ctx->internal_mutex); +} + +static void __release_psp_cmd_lock(struct ras_core_context *ras_core) +{ + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + + if (psp_ctx->external_mutex) + mutex_unlock(psp_ctx->external_mutex); + else + mutex_unlock(&psp_ctx->internal_mutex); +} + +static uint32_t __get_ring_frame_slot(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + uint32_t ras_ring_wptr_dw; + + ras_ring_wptr_dw = psp->ip_func->psp_ras_ring_wptr_get(ras_core); + + return div64_u64((ras_ring_wptr_dw << 2), sizeof(struct psp_gfx_rb_frame)); +} + +static int __set_ring_frame_slot(struct ras_core_context *ras_core, + uint32_t slot) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + return psp->ip_func->psp_ras_ring_wptr_set(ras_core, + (slot * sizeof(struct psp_gfx_rb_frame)) >> 2); +} + +static int write_frame_to_ras_psp_ring(struct ras_core_context *ras_core, + struct psp_gfx_rb_frame *frame) +{ + struct gpu_mem_block *ring_mem; + struct psp_gfx_rb_frame *rb_frame; + uint32_t max_frame_slot; + uint32_t slot_idx; + uint32_t write_flush_read_back = 0; + int ret = 0; + + ring_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_RING); + if (!ring_mem) + return -ENOMEM; + + max_frame_slot = + div64_u64(ring_mem->mem_size, sizeof(struct psp_gfx_rb_frame)); + + rb_frame = + (struct psp_gfx_rb_frame *)ring_mem->mem_cpu_addr; + + slot_idx = __get_ring_frame_slot(ras_core); + if (slot_idx >= max_frame_slot) + slot_idx = 0; + + memcpy(&rb_frame[slot_idx], frame, sizeof(*frame)); + + /* Do a read to force the write of the frame before writing + * write pointer. + */ + write_flush_read_back = rb_frame[slot_idx].fence_value; + if (write_flush_read_back != frame->fence_value) { + RAS_DEV_ERR(ras_core->dev, + "Failed to submit ring cmd! cmd:0x%x:0x%x, fence:0x%x:0x%x value:%u, expected:%u\n", + rb_frame[slot_idx].cmd_buf_addr_hi, + rb_frame[slot_idx].cmd_buf_addr_lo, + rb_frame[slot_idx].fence_addr_hi, + rb_frame[slot_idx].fence_addr_lo, + write_flush_read_back, frame->fence_value); + ret = -EACCES; + goto err; + } + + slot_idx++; + + if (slot_idx >= max_frame_slot) + slot_idx = 0; + + __set_ring_frame_slot(ras_core, slot_idx); + +err: + ras_psp_put_gpu_mem(ras_core, ring_mem); + return ret; +} + +static int send_psp_cmd(struct ras_core_context *ras_core, + enum psp_gfx_cmd_id gfx_cmd_id, void *cmd_data, + uint32_t cmd_size, struct psp_cmd_resp *resp) +{ + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + struct gpu_mem_block *psp_cmd_buf = NULL; + struct gpu_mem_block *psp_fence_buf = NULL; + struct psp_gfx_cmd_resp *gfx_cmd; + struct psp_gfx_rb_frame rb_frame; + int ret = 0; + int timeout = 1000; + + if (!cmd_data || (cmd_size > sizeof(union psp_gfx_commands)) || !resp) { + RAS_DEV_ERR(ras_core->dev, "Invalid RAS PSP command, id: %u\n", gfx_cmd_id); + return -EINVAL; + } + + __acquire_psp_cmd_lock(ras_core); + + psp_cmd_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_CMD); + if (!psp_cmd_buf) { + ret = -ENOMEM; + goto exit; + } + + psp_fence_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_FENCE); + if (!psp_fence_buf) { + ret = -ENOMEM; + goto exit; + } + + gfx_cmd = (struct psp_gfx_cmd_resp *)psp_cmd_buf->mem_cpu_addr; + memset(gfx_cmd, 0, sizeof(*gfx_cmd)); + gfx_cmd->cmd_id = gfx_cmd_id; + memcpy(&gfx_cmd->cmd, cmd_data, cmd_size); + + psp_ctx->in_fence_value++; + + memset(&rb_frame, 0, sizeof(rb_frame)); + rb_frame.cmd_buf_addr_hi = upper_32_bits(psp_cmd_buf->mem_mc_addr); + rb_frame.cmd_buf_addr_lo = lower_32_bits(psp_cmd_buf->mem_mc_addr); + rb_frame.fence_addr_hi = upper_32_bits(psp_fence_buf->mem_mc_addr); + rb_frame.fence_addr_lo = lower_32_bits(psp_fence_buf->mem_mc_addr); + rb_frame.fence_value = psp_ctx->in_fence_value; + + ret = write_frame_to_ras_psp_ring(ras_core, &rb_frame); + if (ret) { + psp_ctx->in_fence_value--; + goto exit; + } + + while (*((uint64_t *)psp_fence_buf->mem_cpu_addr) != + psp_ctx->in_fence_value) { + if (--timeout == 0) + break; + /* + * Shouldn't wait for timeout when err_event_athub occurs, + * because gpu reset thread triggered and lock resource should + * be released for psp resume sequence. + */ + if (ras_core_ras_interrupt_detected(ras_core)) + break; + + msleep(2); + } + + resp->status = gfx_cmd->resp.status; + resp->session_id = gfx_cmd->resp.session_id; + +exit: + ras_psp_put_gpu_mem(ras_core, psp_cmd_buf); + ras_psp_put_gpu_mem(ras_core, psp_fence_buf); + + __release_psp_cmd_lock(ras_core); + + return ret; +} + +static void __check_ras_ta_cmd_resp(struct ras_core_context *ras_core, + struct ras_ta_cmd *ras_cmd) +{ + + if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { + RAS_DEV_WARN(ras_core->dev, "ECC switch disabled\n"); + ras_cmd->ras_status = RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE; + } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) + RAS_DEV_WARN(ras_core->dev, "RAS internal register access blocked\n"); + + switch (ras_cmd->ras_status) { + case RAS_TA_STATUS__ERROR_UNSUPPORTED_IP: + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: cmd failed due to unsupported ip\n"); + break; + case RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: cmd failed due to unsupported error injection\n"); + break; + case RAS_TA_STATUS__SUCCESS: + break; + case RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED: + if (ras_cmd->cmd_id == RAS_TA_CMD_ID__TRIGGER_ERROR) + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: Inject error to critical region is not allowed\n"); + break; + default: + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); + break; + } +} + +static int send_ras_ta_runtime_cmd(struct ras_core_context *ras_core, + enum ras_ta_cmd_id cmd_id, void *in, uint32_t in_size, + void *out, uint32_t out_size) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct gpu_mem_block *cmd_mem; + struct ras_ta_cmd *ras_cmd; + struct psp_gfx_cmd_invoke_cmd invoke_cmd = {0}; + struct psp_cmd_resp resp = {0}; + int ret = 0; + + if (!in || (in_size > sizeof(union ras_ta_cmd_input)) || + (cmd_id >= MAX_RAS_TA_CMD_ID)) { + RAS_DEV_ERR(ras_core->dev, "Invalid RAS TA command, id: %u\n", cmd_id); + return -EINVAL; + } + + ras_psp_sync_system_ras_psp_status(ras_core); + + cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD); + if (!cmd_mem) + return -ENOMEM; + + if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) { + ret = -EACCES; + goto out; + } + + ras_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr; + + mutex_lock(&ta_ctx->ta_mutex); + + memset(ras_cmd, 0, sizeof(*ras_cmd)); + ras_cmd->cmd_id = cmd_id; + memcpy(&ras_cmd->ras_in_message, in, in_size); + + invoke_cmd.ta_cmd_id = cmd_id; + invoke_cmd.session_id = ta_ctx->session_id; + + ret = send_psp_cmd(ras_core, GFX_CMD_ID_INVOKE_CMD, + &invoke_cmd, sizeof(invoke_cmd), &resp); + + /* If err_event_athub occurs error inject was successful, however + * return status from TA is no long reliable + */ + if (ras_core_ras_interrupt_detected(ras_core)) { + ret = 0; + goto unlock; + } + + if (ret || resp.status) { + RAS_DEV_ERR(ras_core->dev, + "RAS: Failed to send psp cmd! ret:%d, status:%u\n", + ret, resp.status); + ret = -ESTRPIPE; + goto unlock; + } + + if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { + RAS_DEV_WARN(ras_core->dev, "RAS: Unsupported Interface\n"); + ret = -EINVAL; + goto unlock; + } + + if (!ras_cmd->ras_status && out && out_size) + memcpy(out, &ras_cmd->ras_out_message, out_size); + + __check_ras_ta_cmd_resp(ras_core, ras_cmd); + +unlock: + mutex_unlock(&ta_ctx->ta_mutex); + ras_core_up_gpu_reset_lock(ras_core); +out: + ras_psp_put_gpu_mem(ras_core, cmd_mem); + return ret; +} + +static int trigger_ras_ta_error(struct ras_core_context *ras_core, + struct ras_ta_trigger_error_input *info, uint32_t instance_mask) +{ + uint32_t dev_mask = 0; + + switch (info->block_id) { + case RAS_TA_BLOCK__GFX: + if (ras_gfx_get_ta_subblock(ras_core, info->inject_error_type, + info->sub_block_index, &info->sub_block_index)) + return -EINVAL; + + dev_mask = RAS_GET_MASK(ras_core->dev, GC, instance_mask); + break; + case RAS_TA_BLOCK__SDMA: + dev_mask = RAS_GET_MASK(ras_core->dev, SDMA0, instance_mask); + break; + case RAS_TA_BLOCK__VCN: + case RAS_TA_BLOCK__JPEG: + dev_mask = RAS_GET_MASK(ras_core->dev, VCN, instance_mask); + break; + default: + dev_mask = instance_mask; + break; + } + + /* reuse sub_block_index for backward compatibility */ + dev_mask <<= RAS_TA_INST_SHIFT; + dev_mask &= RAS_TA_INST_MASK; + info->sub_block_index |= dev_mask; + + return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__TRIGGER_ERROR, + info, sizeof(*info), NULL, 0); +} + +static int send_load_ta_fw_cmd(struct ras_core_context *ras_core, + struct ras_ta_ctx *ta_ctx) +{ + struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin; + struct gpu_mem_block *fw_mem; + struct gpu_mem_block *cmd_mem; + struct ras_ta_cmd *ta_cmd; + struct ras_ta_init_flags *ta_init_flags; + struct psp_gfx_cmd_load_ta psp_load_ta_cmd; + struct psp_cmd_resp resp = {0}; + struct ras_ta_image_header *fw_hdr = NULL; + int ret; + + fw_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_FW); + if (!fw_mem) + return -ENOMEM; + + cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD); + if (!cmd_mem) { + ret = -ENOMEM; + goto err; + } + + ret = ras_psp_get_ras_ta_init_param(ras_core, &ta_ctx->init_param); + if (ret) + goto err; + + if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) { + ret = -EACCES; + goto err; + } + + /* copy ras ta binary to shared gpu memory */ + memcpy(fw_mem->mem_cpu_addr, fw_bin->bin_addr, fw_bin->bin_size); + fw_mem->mem_size = fw_bin->bin_size; + + /* Initialize ras ta startup parameter */ + ta_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr; + ta_init_flags = &ta_cmd->ras_in_message.init_flags; + + ta_init_flags->poison_mode_en = ta_ctx->init_param.poison_mode_en; + ta_init_flags->dgpu_mode = ta_ctx->init_param.dgpu_mode; + ta_init_flags->xcc_mask = ta_ctx->init_param.xcc_mask; + ta_init_flags->channel_dis_num = ta_ctx->init_param.channel_dis_num; + ta_init_flags->nps_mode = ta_ctx->init_param.nps_mode; + ta_init_flags->active_umc_mask = ta_ctx->init_param.active_umc_mask; + + /* Setup load ras ta command */ + memset(&psp_load_ta_cmd, 0, sizeof(psp_load_ta_cmd)); + psp_load_ta_cmd.app_phy_addr_lo = lower_32_bits(fw_mem->mem_mc_addr); + psp_load_ta_cmd.app_phy_addr_hi = upper_32_bits(fw_mem->mem_mc_addr); + psp_load_ta_cmd.app_len = fw_mem->mem_size; + psp_load_ta_cmd.cmd_buf_phy_addr_lo = lower_32_bits(cmd_mem->mem_mc_addr); + psp_load_ta_cmd.cmd_buf_phy_addr_hi = upper_32_bits(cmd_mem->mem_mc_addr); + psp_load_ta_cmd.cmd_buf_len = cmd_mem->mem_size; + + ret = send_psp_cmd(ras_core, GFX_CMD_ID_LOAD_TA, + &psp_load_ta_cmd, sizeof(psp_load_ta_cmd), &resp); + if (!ret && !resp.status) { + /* Read TA version at FW offset 0x60 if TA version not found*/ + fw_hdr = (struct ras_ta_image_header *)fw_bin->bin_addr; + RAS_DEV_INFO(ras_core->dev, "PSP: RAS TA(version:%X.%X.%X.%X) is loaded.\n", + (fw_hdr->image_version >> 24) & 0xFF, (fw_hdr->image_version >> 16) & 0xFF, + (fw_hdr->image_version >> 8) & 0xFF, fw_hdr->image_version & 0xFF); + ta_ctx->ta_version = fw_hdr->image_version; + ta_ctx->session_id = resp.session_id; + ta_ctx->ras_ta_initialized = true; + } else { + RAS_DEV_ERR(ras_core->dev, + "Failed to load RAS TA! ret:%d, status:%d\n", ret, resp.status); + } + + ras_core_up_gpu_reset_lock(ras_core); + +err: + ras_psp_put_gpu_mem(ras_core, fw_mem); + ras_psp_put_gpu_mem(ras_core, cmd_mem); + return ret; +} + +static int load_ras_ta_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_load *ras_ta_load) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin; + int ret; + + fw_bin->bin_addr = ras_ta_load->bin_addr; + fw_bin->bin_size = ras_ta_load->bin_size; + fw_bin->fw_version = ras_ta_load->fw_version; + fw_bin->feature_version = ras_ta_load->feature_version; + + ret = send_load_ta_fw_cmd(ras_core, ta_ctx); + if (!ret) { + ras_ta_load->out_session_id = ta_ctx->session_id; + ras_ta_load->out_loaded_ta_version = ta_ctx->ta_version; + } + + return ret; +} + +static int unload_ras_ta_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_unload *ras_ta_unload) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct psp_gfx_cmd_unload_ta cmd_unload_ta = {0}; + struct psp_cmd_resp resp = {0}; + int ret; + + if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) + return -EACCES; + + cmd_unload_ta.session_id = ta_ctx->session_id; + ret = send_psp_cmd(ras_core, GFX_CMD_ID_UNLOAD_TA, + &cmd_unload_ta, sizeof(cmd_unload_ta), &resp); + if (ret || resp.status) { + RAS_DEV_ERR(ras_core->dev, + "Failed to unload RAS TA! ret:%d, status:%u\n", + ret, resp.status); + goto unlock; + } + + kfree(ta_ctx->fw_bin.bin_addr); + memset(&ta_ctx->fw_bin, 0, sizeof(ta_ctx->fw_bin)); + ta_ctx->ta_version = 0; + ta_ctx->ras_ta_initialized = false; + ta_ctx->session_id = 0; + +unlock: + ras_core_up_gpu_reset_lock(ras_core); + + return ret; +} + +int ras_psp_load_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_load *ras_ta_load) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct ras_psp_ta_unload ras_ta_unload = {0}; + int ret; + + if (ta_ctx->preload_ras_ta_enabled) + return 0; + + if (!ras_ta_load) + return -EINVAL; + + if (ta_ctx->ras_ta_initialized) { + ras_ta_unload.ras_session_id = ta_ctx->session_id; + ret = unload_ras_ta_firmware(ras_core, &ras_ta_unload); + if (ret) + return ret; + } + + return load_ras_ta_firmware(ras_core, ras_ta_load); +} + +int ras_psp_unload_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_unload *ras_ta_unload) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + + if (ta_ctx->preload_ras_ta_enabled) + return 0; + + if ((!ras_ta_unload) || + (ras_ta_unload->ras_session_id != ta_ctx->session_id)) + return -EINVAL; + + return unload_ras_ta_firmware(ras_core, ras_ta_unload); +} + +int ras_psp_trigger_error(struct ras_core_context *ras_core, + struct ras_ta_trigger_error_input *info, uint32_t instance_mask) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + + if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized) { + RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!"); + return -ENOEXEC; + } + + if (!info) + return -EINVAL; + + return trigger_ras_ta_error(ras_core, info, instance_mask); +} + +int ras_psp_query_address(struct ras_core_context *ras_core, + struct ras_ta_query_address_input *addr_in, + struct ras_ta_query_address_output *addr_out) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + + if (!ta_ctx->preload_ras_ta_enabled && + !ta_ctx->ras_ta_initialized) { + RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!"); + return -ENOEXEC; + } + + if (!addr_in || !addr_out) + return -EINVAL; + + return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS, + addr_in, sizeof(*addr_in), addr_out, sizeof(*addr_out)); +} + +int ras_psp_sw_init(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + memset(psp, 0, sizeof(*psp)); + + psp->sys_func = ras_core->config->psp_cfg.psp_sys_fn; + if (!psp->sys_func) { + RAS_DEV_ERR(ras_core->dev, "RAS psp sys function not configured!\n"); + return -EINVAL; + } + + mutex_init(&psp->psp_ctx.internal_mutex); + mutex_init(&psp->ta_ctx.ta_mutex); + + return 0; +} + +int ras_psp_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + mutex_destroy(&psp->psp_ctx.internal_mutex); + mutex_destroy(&psp->ta_ctx.ta_mutex); + + memset(psp, 0, sizeof(*psp)); + + return 0; +} + +int ras_psp_hw_init(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + psp->psp_ip_version = ras_core->config->psp_ip_version; + + psp->ip_func = ras_psp_get_ip_funcs(ras_core, psp->psp_ip_version); + if (!psp->ip_func) + return -EINVAL; + + /* After GPU reset, the system RAS PSP status may change. + * therefore, it is necessary to synchronize the system status again. + */ + ras_psp_sync_system_ras_psp_status(ras_core); + + return 0; +} + +int ras_psp_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} + +bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core, + enum ras_ta_cmd_id cmd_id) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + bool ret = false; + + if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized) + return false; + + switch (cmd_id) { + case RAS_TA_CMD_ID__QUERY_ADDRESS: + /* Currently, querying the address from RAS TA is only supported + * when the RAS TA firmware is loaded during driver installation. + */ + if (ta_ctx->preload_ras_ta_enabled) + ret = true; + break; + case RAS_TA_CMD_ID__TRIGGER_ERROR: + ret = true; + break; + default: + ret = false; + break; + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h new file mode 100644 index 000000000000..71776fecfd66 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_PSP_H__ +#define __RAS_PSP_H__ +#include "ras.h" +#include "ras_ta_if.h" + +struct ras_core_context; +struct ras_ta_trigger_error_input; +struct ras_ta_query_address_input; +struct ras_ta_query_address_output; +enum ras_ta_cmd_id; + +struct ras_ta_image_header { + uint32_t reserved1[24]; + uint32_t image_version; /* [0x60] Off Chip Firmware Version */ + uint32_t reserved2[39]; +}; + +struct ras_psp_sys_status { + bool initialized; + uint32_t session_id; + void *psp_cmd_mutex; +}; + +struct ras_ta_init_param { + uint8_t poison_mode_en; + uint8_t dgpu_mode; + uint16_t xcc_mask; + uint8_t channel_dis_num; + uint8_t nps_mode; + uint32_t active_umc_mask; +}; + +struct gpu_mem_block { + uint32_t mem_type; + void *mem_bo; + uint64_t mem_mc_addr; + void *mem_cpu_addr; + uint32_t mem_size; + int ref_count; + void *private; +}; + +struct ras_psp_ip_func { + uint32_t (*psp_ras_ring_wptr_get)(struct ras_core_context *ras_core); + int (*psp_ras_ring_wptr_set)(struct ras_core_context *ras_core, uint32_t wptr); +}; + +struct ras_psp_ring { + struct gpu_mem_block ras_ring_gpu_mem; +}; + +struct psp_cmd_resp { + uint32_t status; + uint32_t session_id; +}; + +struct ras_psp_ctx { + void *external_mutex; + struct mutex internal_mutex; + uint64_t in_fence_value; + struct gpu_mem_block psp_cmd_gpu_mem; + struct gpu_mem_block out_fence_gpu_mem; +}; + +struct ras_ta_fw_bin { + uint32_t fw_version; + uint32_t feature_version; + uint32_t bin_size; + uint8_t *bin_addr; +}; + +struct ras_ta_ctx { + bool preload_ras_ta_enabled; + bool ras_ta_initialized; + uint32_t session_id; + uint32_t resp_status; + uint32_t ta_version; + struct mutex ta_mutex; + struct ras_ta_fw_bin fw_bin; + struct ras_ta_init_param init_param; + struct gpu_mem_block fw_gpu_mem; + struct gpu_mem_block cmd_gpu_mem; +}; + +struct ras_psp { + uint32_t psp_ip_version; + struct ras_psp_ring psp_ring; + struct ras_psp_ctx psp_ctx; + struct ras_ta_ctx ta_ctx; + const struct ras_psp_ip_func *ip_func; + const struct ras_psp_sys_func *sys_func; +}; + +struct ras_psp_ta_load { + uint32_t fw_version; + uint32_t feature_version; + uint32_t bin_size; + uint8_t *bin_addr; + uint64_t out_session_id; + uint32_t out_loaded_ta_version; +}; + +struct ras_psp_ta_unload { + uint64_t ras_session_id; +}; + +int ras_psp_sw_init(struct ras_core_context *ras_core); +int ras_psp_sw_fini(struct ras_core_context *ras_core); +int ras_psp_hw_init(struct ras_core_context *ras_core); +int ras_psp_hw_fini(struct ras_core_context *ras_core); +int ras_psp_load_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_load *ras_ta_load); +int ras_psp_unload_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_unload *ras_ta_unload); +int ras_psp_trigger_error(struct ras_core_context *ras_core, + struct ras_ta_trigger_error_input *info, uint32_t instance_mask); +int ras_psp_query_address(struct ras_core_context *ras_core, + struct ras_ta_query_address_input *addr_in, + struct ras_ta_query_address_output *addr_out); +bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core, + enum ras_ta_cmd_id cmd_id); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c new file mode 100644 index 000000000000..626cf39b75ac --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_psp_v13_0.h" + +#define regMP0_SMN_C2PMSG_67 0x0083 +#define regMP0_SMN_C2PMSG_67_BASE_IDX 0 + +static uint32_t ras_psp_v13_0_ring_wptr_get(struct ras_core_context *ras_core) +{ + return RAS_DEV_RREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67); +} + +static int ras_psp_v13_0_ring_wptr_set(struct ras_core_context *ras_core, uint32_t value) +{ + RAS_DEV_WREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67, value); + + return 0; +} + +const struct ras_psp_ip_func ras_psp_v13_0 = { + .psp_ras_ring_wptr_get = ras_psp_v13_0_ring_wptr_get, + .psp_ras_ring_wptr_set = ras_psp_v13_0_ring_wptr_set, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h new file mode 100644 index 000000000000..b705ffe38a12 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_PSP_V13_0_H__ +#define __RAS_PSP_V13_0_H__ +#include "ras_psp.h" + +extern const struct ras_psp_ip_func ras_psp_v13_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h new file mode 100644 index 000000000000..0921e36d3274 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _RAS_TA_IF_H +#define _RAS_TA_IF_H +#include "ras.h" + +#define RAS_TA_HOST_IF_VER 0 + +/* Responses have bit 31 set */ +#define RSP_ID_MASK (1U << 31) +#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK) + +/* invalid node instance value */ +#define RAS_TA_INV_NODE 0xffff + +/* RAS related enumerations */ +/**********************************************************/ +enum ras_ta_cmd_id { + RAS_TA_CMD_ID__ENABLE_FEATURES = 0, + RAS_TA_CMD_ID__DISABLE_FEATURES, + RAS_TA_CMD_ID__TRIGGER_ERROR, + RAS_TA_CMD_ID__QUERY_BLOCK_INFO, + RAS_TA_CMD_ID__QUERY_SUB_BLOCK_INFO, + RAS_TA_CMD_ID__QUERY_ADDRESS, + MAX_RAS_TA_CMD_ID +}; + +enum ras_ta_status { + RAS_TA_STATUS__SUCCESS = 0x0000, + RAS_TA_STATUS__RESET_NEEDED = 0xA001, + RAS_TA_STATUS__ERROR_INVALID_PARAMETER = 0xA002, + RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003, + RAS_TA_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004, + RAS_TA_STATUS__ERROR_INJECTION_FAILED = 0xA005, + RAS_TA_STATUS__ERROR_ASD_READ_WRITE = 0xA006, + RAS_TA_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007, + RAS_TA_STATUS__ERROR_TIMEOUT = 0xA008, + RAS_TA_STATUS__ERROR_BLOCK_DISABLED = 0XA009, + RAS_TA_STATUS__ERROR_GENERIC = 0xA00A, + RAS_TA_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B, + RAS_TA_STATUS__ERROR_GET_DEV_INFO = 0xA00C, + RAS_TA_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D, + RAS_TA_STATUS__ERROR_NOT_INITIALIZED = 0xA00E, + RAS_TA_STATUS__ERROR_TEE_INTERNAL = 0xA00F, + RAS_TA_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010, + RAS_TA_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011, + RAS_TA_STATUS__ERROR_RAS_READ_WRITE = 0xA012, + RAS_TA_STATUS__ERROR_NULL_PTR = 0xA013, + RAS_TA_STATUS__ERROR_UNSUPPORTED_IP = 0xA014, + RAS_TA_STATUS__ERROR_PCS_STATE_QUIET = 0xA015, + RAS_TA_STATUS__ERROR_PCS_STATE_ERROR = 0xA016, + RAS_TA_STATUS__ERROR_PCS_STATE_HANG = 0xA017, + RAS_TA_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018, + RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019, + RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED = 0xA01A +}; + +enum ras_ta_block { + RAS_TA_BLOCK__UMC = 0, + RAS_TA_BLOCK__SDMA, + RAS_TA_BLOCK__GFX, + RAS_TA_BLOCK__MMHUB, + RAS_TA_BLOCK__ATHUB, + RAS_TA_BLOCK__PCIE_BIF, + RAS_TA_BLOCK__HDP, + RAS_TA_BLOCK__XGMI_WAFL, + RAS_TA_BLOCK__DF, + RAS_TA_BLOCK__SMN, + RAS_TA_BLOCK__SEM, + RAS_TA_BLOCK__MP0, + RAS_TA_BLOCK__MP1, + RAS_TA_BLOCK__FUSE, + RAS_TA_BLOCK__MCA, + RAS_TA_BLOCK__VCN, + RAS_TA_BLOCK__JPEG, + RAS_TA_BLOCK__IH, + RAS_TA_BLOCK__MPIO, + RAS_TA_BLOCK__MMSCH, + RAS_TA_NUM_BLOCK_MAX +}; + +enum ras_ta_mca_block { + RAS_TA_MCA_BLOCK__MP0 = 0, + RAS_TA_MCA_BLOCK__MP1 = 1, + RAS_TA_MCA_BLOCK__MPIO = 2, + RAS_TA_MCA_BLOCK__IOHC = 3, + RAS_TA_MCA_NUM_BLOCK_MAX +}; + +enum ras_ta_error_type { + RAS_TA_ERROR__NONE = 0, + RAS_TA_ERROR__PARITY = 1, + RAS_TA_ERROR__SINGLE_CORRECTABLE = 2, + RAS_TA_ERROR__MULTI_UNCORRECTABLE = 4, + RAS_TA_ERROR__POISON = 8, +}; + +enum ras_ta_address_type { + RAS_TA_MCA_TO_PA, + RAS_TA_PA_TO_MCA, +}; + +enum ras_ta_nps_mode { + RAS_TA_UNKNOWN_MODE = 0, + RAS_TA_NPS1_MODE = 1, + RAS_TA_NPS2_MODE = 2, + RAS_TA_NPS4_MODE = 4, + RAS_TA_NPS8_MODE = 8, +}; + +/* Input/output structures for RAS commands */ +/**********************************************************/ + +struct ras_ta_enable_features_input { + enum ras_ta_block block_id; + enum ras_ta_error_type error_type; +}; + +struct ras_ta_disable_features_input { + enum ras_ta_block block_id; + enum ras_ta_error_type error_type; +}; + +struct ras_ta_trigger_error_input { + /* ras-block. i.e. umc, gfx */ + enum ras_ta_block block_id; + + /* type of error. i.e. single_correctable */ + enum ras_ta_error_type inject_error_type; + + /* mem block. i.e. hbm, sram etc. */ + uint32_t sub_block_index; + + /* explicit address of error */ + uint64_t address; + + /* method if error injection. i.e persistent, coherent etc. */ + uint64_t value; +}; + +struct ras_ta_init_flags { + uint8_t poison_mode_en; + uint8_t dgpu_mode; + uint16_t xcc_mask; + uint8_t channel_dis_num; + uint8_t nps_mode; + uint32_t active_umc_mask; +}; + +struct ras_ta_mca_addr { + uint64_t err_addr; + uint32_t ch_inst; + uint32_t umc_inst; + uint32_t node_inst; + uint32_t socket_id; +}; + +struct ras_ta_phy_addr { + uint64_t pa; + uint32_t bank; + uint32_t channel_idx; +}; + +struct ras_ta_query_address_input { + enum ras_ta_address_type addr_type; + struct ras_ta_mca_addr ma; + struct ras_ta_phy_addr pa; +}; + +struct ras_ta_output_flags { + uint8_t ras_init_success_flag; + uint8_t err_inject_switch_disable_flag; + uint8_t reg_access_failure_flag; +}; + +struct ras_ta_query_address_output { + /* don't use the flags here */ + struct ras_ta_output_flags flags; + struct ras_ta_mca_addr ma; + struct ras_ta_phy_addr pa; +}; + +/* Common input structure for RAS callbacks */ +/**********************************************************/ +union ras_ta_cmd_input { + struct ras_ta_init_flags init_flags; + struct ras_ta_enable_features_input enable_features; + struct ras_ta_disable_features_input disable_features; + struct ras_ta_trigger_error_input trigger_error; + struct ras_ta_query_address_input address; + uint32_t reserve_pad[256]; +}; + +union ras_ta_cmd_output { + struct ras_ta_output_flags flags; + struct ras_ta_query_address_output address; + uint32_t reserve_pad[256]; +}; + +struct ras_ta_cmd { + uint32_t cmd_id; + uint32_t resp_id; + uint32_t ras_status; + uint32_t if_version; + union ras_ta_cmd_input ras_in_message; + union ras_ta_cmd_output ras_out_message; +}; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c new file mode 100644 index 000000000000..4dae64c424a2 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c @@ -0,0 +1,707 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_umc.h" +#include "ras_umc_v12_0.h" + +#define MAX_ECC_NUM_PER_RETIREMENT 16 + +/* bad page timestamp format + * yy[31:27] mm[26:23] day[22:17] hh[16:12] mm[11:6] ss[5:0] + */ +#define EEPROM_TIMESTAMP_MINUTE 6 +#define EEPROM_TIMESTAMP_HOUR 12 +#define EEPROM_TIMESTAMP_DAY 17 +#define EEPROM_TIMESTAMP_MONTH 23 +#define EEPROM_TIMESTAMP_YEAR 27 + +static uint64_t ras_umc_get_eeprom_timestamp(struct ras_core_context *ras_core) +{ + struct ras_time tm = {0}; + uint64_t utc_timestamp = 0; + uint64_t eeprom_timestamp = 0; + + utc_timestamp = ras_core_get_utc_second_timestamp(ras_core); + if (!utc_timestamp) + return utc_timestamp; + + ras_core_convert_timestamp_to_time(ras_core, utc_timestamp, &tm); + + /* the year range is 2000 ~ 2031, set the year if not in the range */ + if (tm.tm_year < 2000) + tm.tm_year = 2000; + if (tm.tm_year > 2031) + tm.tm_year = 2031; + + tm.tm_year -= 2000; + + eeprom_timestamp = tm.tm_sec + (tm.tm_min << EEPROM_TIMESTAMP_MINUTE) + + (tm.tm_hour << EEPROM_TIMESTAMP_HOUR) + + (tm.tm_mday << EEPROM_TIMESTAMP_DAY) + + (tm.tm_mon << EEPROM_TIMESTAMP_MONTH) + + (tm.tm_year << EEPROM_TIMESTAMP_YEAR); + eeprom_timestamp &= 0xffffffff; + + return eeprom_timestamp; +} + +static const struct ras_umc_ip_func *ras_umc_get_ip_func( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 5, 0): + return &ras_umc_func_v12_0; + default: + RAS_DEV_ERR(ras_core->dev, + "UMC ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *in, struct umc_phy_addr *out, + uint32_t nps) +{ + struct ras_ta_query_address_input addr_in; + struct ras_ta_query_address_output addr_out; + int ret; + + if (!in) + return -EINVAL; + + memset(&addr_in, 0, sizeof(addr_in)); + memset(&addr_out, 0, sizeof(addr_out)); + + addr_in.ma.err_addr = in->err_addr; + addr_in.ma.ch_inst = in->ch_inst; + addr_in.ma.umc_inst = in->umc_inst; + addr_in.ma.node_inst = in->node_inst; + addr_in.ma.socket_id = in->socket_id; + + addr_in.addr_type = RAS_TA_MCA_TO_PA; + + ret = ras_psp_query_address(ras_core, &addr_in, &addr_out); + if (ret) { + RAS_DEV_WARN(ras_core->dev, + "Failed to query RAS physical address for 0x%llx, ret:%d", + in->err_addr, ret); + return -EREMOTEIO; + } + + if (out) { + out->pa = addr_out.pa.pa; + out->bank = addr_out.pa.bank; + out->channel_idx = addr_out.pa.channel_idx; + } + + return 0; +} + +static int ras_umc_log_ecc(struct ras_core_context *ras_core, + unsigned long idx, void *data) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + int ret; + + mutex_lock(&ras_umc->tree_lock); + ret = radix_tree_insert(&ras_umc->root, idx, data); + if (!ret) + radix_tree_tag_set(&ras_umc->root, idx, UMC_ECC_NEW_DETECTED_TAG); + mutex_unlock(&ras_umc->tree_lock); + + return ret; +} + +int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + uint64_t buf[8] = {0}; + void **slot; + void *data; + void *iter = buf; + + mutex_lock(&ras_umc->tree_lock); + radix_tree_for_each_slot(slot, &ras_umc->root, iter, 0) { + data = ras_radix_tree_delete_iter(&ras_umc->root, iter); + kfree(data); + } + mutex_unlock(&ras_umc->tree_lock); + + return 0; +} + +static void ras_umc_reserve_eeprom_record(struct ras_core_context *ras_core, + struct eeprom_umc_record *record) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + uint64_t page_pfn[16]; + int count = 0, i; + + memset(page_pfn, 0, sizeof(page_pfn)); + if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages) { + count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core, + record, record->cur_nps, page_pfn, ARRAY_SIZE(page_pfn)); + if (count <= 0) { + RAS_DEV_ERR(ras_core->dev, + "Fail to convert error address! count:%d\n", count); + return; + } + } + + /* Reserve memory */ + for (i = 0; i < count; i++) + ras_core_event_notify(ras_core, + RAS_EVENT_ID__RESERVE_BAD_PAGE, &page_pfn[i]); +} + +/* When gpu reset is ongoing, ecc logging operations will be pended. + */ +int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_bank_ecc_node *ecc_node; + + ecc_node = kzalloc(sizeof(*ecc_node), GFP_KERNEL); + if (!ecc_node) + return -ENOMEM; + + memcpy(&ecc_node->ecc, bank, sizeof(ecc_node->ecc)); + + mutex_lock(&ras_umc->pending_ecc_lock); + list_add_tail(&ecc_node->node, &ras_umc->pending_ecc_list); + mutex_unlock(&ras_umc->pending_ecc_lock); + + return 0; +} + +/* After gpu reset is complete, re-log the pending error banks. + */ +int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_bank_ecc_node *ecc_node, *tmp; + + mutex_lock(&ras_umc->pending_ecc_lock); + list_for_each_entry_safe(ecc_node, + tmp, &ras_umc->pending_ecc_list, node){ + if (ecc_node && !ras_umc_log_bad_bank(ras_core, &ecc_node->ecc)) { + list_del(&ecc_node->node); + kfree(ecc_node); + } + } + mutex_unlock(&ras_umc->pending_ecc_lock); + + return 0; +} + +int ras_umc_log_bad_bank(struct ras_core_context *ras_core, struct ras_bank_ecc *bank) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_umc_record umc_rec; + struct eeprom_umc_record *err_rec; + int ret; + + memset(&umc_rec, 0, sizeof(umc_rec)); + + mutex_lock(&ras_umc->bank_log_lock); + ret = ras_umc->ip_func->bank_to_eeprom_record(ras_core, bank, &umc_rec); + if (ret) + goto out; + + err_rec = kzalloc(sizeof(*err_rec), GFP_KERNEL); + if (!err_rec) { + ret = -ENOMEM; + goto out; + } + + memcpy(err_rec, &umc_rec, sizeof(umc_rec)); + ret = ras_umc_log_ecc(ras_core, err_rec->cur_nps_retired_row_pfn, err_rec); + if (ret) { + if (ret == -EEXIST) { + RAS_DEV_INFO(ras_core->dev, "The bad pages have been logged before.\n"); + ret = 0; + } + + kfree(err_rec); + goto out; + } + + ras_umc_reserve_eeprom_record(ras_core, err_rec); + + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__BAD_PAGE_DETECTED, NULL); + +out: + mutex_unlock(&ras_umc->bank_log_lock); + return ret; +} + +static int ras_umc_get_new_records(struct ras_core_context *ras_core, + struct eeprom_umc_record *records, u32 num) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_umc_record *entries[MAX_ECC_NUM_PER_RETIREMENT]; + u32 entry_num = num < MAX_ECC_NUM_PER_RETIREMENT ? num : MAX_ECC_NUM_PER_RETIREMENT; + int count = 0; + int new_detected, i; + + mutex_lock(&ras_umc->tree_lock); + new_detected = radix_tree_gang_lookup_tag(&ras_umc->root, (void **)entries, + 0, entry_num, UMC_ECC_NEW_DETECTED_TAG); + for (i = 0; i < new_detected; i++) { + if (!entries[i]) + continue; + + memcpy(&records[i], entries[i], sizeof(struct eeprom_umc_record)); + count++; + radix_tree_tag_clear(&ras_umc->root, + entries[i]->cur_nps_retired_row_pfn, UMC_ECC_NEW_DETECTED_TAG); + } + mutex_unlock(&ras_umc->tree_lock); + + return count; +} + +static bool ras_umc_check_retired_record(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, bool from_eeprom) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data; + uint32_t nps = 0; + int i, ret; + + if (from_eeprom) { + nps = ras_umc->umc_err_data.umc_nps_mode; + if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_record) { + ret = ras_umc->ip_func->eeprom_record_to_nps_record(ras_core, record, nps); + if (ret) + RAS_DEV_WARN(ras_core->dev, + "Failed to adjust eeprom record, ret:%d", ret); + } + return false; + } + + for (i = 0; i < data->count; i++) { + if ((data->bps[i].retired_row_pfn == record->retired_row_pfn) && + (data->bps[i].cur_nps_retired_row_pfn == record->cur_nps_retired_row_pfn)) + return true; + } + + return false; +} + +/* alloc/realloc bps array */ +static int ras_umc_realloc_err_data_space(struct ras_core_context *ras_core, + struct eeprom_store_record *data, int pages) +{ + unsigned int old_space = data->count + data->space_left; + unsigned int new_space = old_space + pages; + unsigned int align_space = ALIGN(new_space, 512); + void *bps = kzalloc(align_space * sizeof(*data->bps), GFP_KERNEL); + + if (!bps) + return -ENOMEM; + + if (data->bps) { + memcpy(bps, data->bps, + data->count * sizeof(*data->bps)); + kfree(data->bps); + } + + data->bps = bps; + data->space_left += align_space - old_space; + return 0; +} + +static int ras_umc_update_eeprom_rom_data(struct ras_core_context *ras_core, + struct eeprom_umc_record *bps) +{ + struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.rom_data; + + if (!data->space_left && + ras_umc_realloc_err_data_space(ras_core, data, 256)) { + return -ENOMEM; + } + + memcpy(&data->bps[data->count], bps, sizeof(*data->bps)); + data->count++; + data->space_left--; + return 0; +} + +static int ras_umc_update_eeprom_ram_data(struct ras_core_context *ras_core, + struct eeprom_umc_record *bps) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data; + uint64_t page_pfn[16]; + int count = 0, j; + + if (!data->space_left && + ras_umc_realloc_err_data_space(ras_core, data, 256)) { + return -ENOMEM; + } + + memset(page_pfn, 0, sizeof(page_pfn)); + if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages) + count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core, + bps, bps->cur_nps, page_pfn, ARRAY_SIZE(page_pfn)); + + if (count > 0) { + for (j = 0; j < count; j++) { + bps->cur_nps_retired_row_pfn = page_pfn[j]; + memcpy(&data->bps[data->count], bps, sizeof(*data->bps)); + data->count++; + data->space_left--; + } + } else { + memcpy(&data->bps[data->count], bps, sizeof(*data->bps)); + data->count++; + data->space_left--; + } + + return 0; +} + +/* it deal with vram only. */ +static int ras_umc_add_bad_pages(struct ras_core_context *ras_core, + struct eeprom_umc_record *bps, + int pages, bool from_eeprom) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_umc_err_data *data = &ras_umc->umc_err_data; + int i, ret = 0; + + if (!bps || pages <= 0) + return 0; + + mutex_lock(&ras_umc->umc_lock); + for (i = 0; i < pages; i++) { + if (ras_umc_check_retired_record(ras_core, &bps[i], from_eeprom)) + continue; + + ret = ras_umc_update_eeprom_rom_data(ras_core, &bps[i]); + if (ret) + goto out; + + if (data->last_retired_pfn == bps[i].cur_nps_retired_row_pfn) + continue; + + data->last_retired_pfn = bps[i].cur_nps_retired_row_pfn; + + if (from_eeprom) + ras_umc_reserve_eeprom_record(ras_core, &bps[i]); + + ret = ras_umc_update_eeprom_ram_data(ras_core, &bps[i]); + if (ret) + goto out; + } +out: + mutex_unlock(&ras_umc->umc_lock); + + return ret; +} + +/* + * read error record array in eeprom and reserve enough space for + * storing new bad pages + */ +int ras_umc_load_bad_pages(struct ras_core_context *ras_core) +{ + struct eeprom_umc_record *bps; + uint32_t ras_num_recs; + int ret; + + ras_num_recs = ras_eeprom_get_record_count(ras_core); + /* no bad page record, skip eeprom access */ + if (!ras_num_recs || + ras_core->ras_eeprom.record_threshold_config == DISABLE_RETIRE_PAGE) + return 0; + + bps = kcalloc(ras_num_recs, sizeof(*bps), GFP_KERNEL); + if (!bps) + return -ENOMEM; + + ret = ras_eeprom_read(ras_core, bps, ras_num_recs); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "Failed to load EEPROM table records!"); + } else { + ras_core->ras_umc.umc_err_data.last_retired_pfn = UMC_INV_MEM_PFN; + ret = ras_umc_add_bad_pages(ras_core, bps, ras_num_recs, true); + } + + kfree(bps); + return ret; +} + +/* + * write error record array to eeprom, the function should be + * protected by recovery_lock + * new_cnt: new added UE count, excluding reserved bad pages, can be NULL + */ +static int ras_umc_save_bad_pages(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data; + uint32_t eeprom_record_num; + int save_count; + int ret = 0; + + if (!data->bps) + return 0; + + eeprom_record_num = ras_eeprom_get_record_count(ras_core); + mutex_lock(&ras_umc->umc_lock); + save_count = data->count - eeprom_record_num; + /* only new entries are saved */ + if (save_count > 0) { + if (ras_eeprom_append(ras_core, + &data->bps[eeprom_record_num], + save_count)) { + RAS_DEV_ERR(ras_core->dev, "Failed to save EEPROM table data!"); + ret = -EIO; + goto exit; + } + + RAS_DEV_INFO(ras_core->dev, "Saved %d pages to EEPROM table.\n", save_count); + } + +exit: + mutex_unlock(&ras_umc->umc_lock); + return ret; +} + +int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data) +{ + struct eeprom_umc_record records[MAX_ECC_NUM_PER_RETIREMENT]; + int count, ret; + + memset(records, 0, sizeof(records)); + count = ras_umc_get_new_records(ras_core, records, ARRAY_SIZE(records)); + if (count <= 0) + return -ENODATA; + + ret = ras_umc_add_bad_pages(ras_core, records, count, false); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "Failed to add ras bad page!\n"); + return -EINVAL; + } + + ret = ras_umc_save_bad_pages(ras_core); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "Failed to save ras bad page\n"); + return -EINVAL; + } + + return 0; +} + +int ras_umc_sw_init(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + + memset(ras_umc, 0, sizeof(*ras_umc)); + + INIT_LIST_HEAD(&ras_umc->pending_ecc_list); + + INIT_RADIX_TREE(&ras_umc->root, GFP_KERNEL); + + mutex_init(&ras_umc->tree_lock); + mutex_init(&ras_umc->pending_ecc_lock); + mutex_init(&ras_umc->umc_lock); + mutex_init(&ras_umc->bank_log_lock); + + return 0; +} + +int ras_umc_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_umc_err_data *umc_err_data = &ras_umc->umc_err_data; + struct ras_bank_ecc_node *ecc_node, *tmp; + + mutex_destroy(&ras_umc->umc_lock); + mutex_destroy(&ras_umc->bank_log_lock); + + if (umc_err_data->rom_data.bps) { + umc_err_data->rom_data.count = 0; + kfree(umc_err_data->rom_data.bps); + umc_err_data->rom_data.bps = NULL; + umc_err_data->rom_data.space_left = 0; + } + + if (umc_err_data->ram_data.bps) { + umc_err_data->ram_data.count = 0; + kfree(umc_err_data->ram_data.bps); + umc_err_data->ram_data.bps = NULL; + umc_err_data->ram_data.space_left = 0; + } + + ras_umc_clear_logged_ecc(ras_core); + + mutex_lock(&ras_umc->pending_ecc_lock); + list_for_each_entry_safe(ecc_node, + tmp, &ras_umc->pending_ecc_list, node){ + list_del(&ecc_node->node); + kfree(ecc_node); + } + mutex_unlock(&ras_umc->pending_ecc_lock); + + mutex_destroy(&ras_umc->tree_lock); + mutex_destroy(&ras_umc->pending_ecc_lock); + + return 0; +} + +int ras_umc_hw_init(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + uint32_t nps; + + nps = ras_core_get_curr_nps_mode(ras_core); + + if (!nps || (nps >= UMC_MEMORY_PARTITION_MODE_UNKNOWN)) { + RAS_DEV_ERR(ras_core->dev, "Invalid memory NPS mode: %u!\n", nps); + return -ENODATA; + } + + ras_umc->umc_err_data.umc_nps_mode = nps; + + ras_umc->umc_vram_type = ras_core->config->umc_cfg.umc_vram_type; + if (!ras_umc->umc_vram_type) { + RAS_DEV_ERR(ras_core->dev, "Invalid UMC VRAM Type: %u!\n", + ras_umc->umc_vram_type); + return -ENODATA; + } + + ras_umc->umc_ip_version = ras_core->config->umc_ip_version; + ras_umc->ip_func = ras_umc_get_ip_func(ras_core, ras_umc->umc_ip_version); + if (!ras_umc->ip_func) + return -EINVAL; + + return 0; +} + +int ras_umc_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} + +int ras_umc_clean_badpage_data(struct ras_core_context *ras_core) +{ + struct ras_umc_err_data *data = &ras_core->ras_umc.umc_err_data; + + mutex_lock(&ras_core->ras_umc.umc_lock); + + kfree(data->rom_data.bps); + kfree(data->ram_data.bps); + + memset(data, 0, sizeof(*data)); + mutex_unlock(&ras_core->ras_umc.umc_lock); + + return 0; +} + +int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core, + uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr, + enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record) +{ + struct eeprom_umc_record *err_rec = record; + + /* Set bad page pfn and nps mode */ + EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(err_rec, + RAS_ADDR_TO_PFN(cur_nps_addr->pa), cur_nps); + + err_rec->address = err_addr; + err_rec->ts = ras_umc_get_eeprom_timestamp(ras_core); + err_rec->err_type = RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = cur_nps_addr->channel_idx; + err_rec->mcumc_id = umc_inst; + err_rec->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(cur_nps_addr->pa); + err_rec->cur_nps_bank = cur_nps_addr->bank; + err_rec->cur_nps = cur_nps; + return 0; +} + +int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core) +{ + struct ras_umc_err_data *err_data = &ras_core->ras_umc.umc_err_data; + + return err_data->rom_data.count; +} + +int ras_umc_get_badpage_count(struct ras_core_context *ras_core) +{ + struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data; + + return data->count; +} + +int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record) +{ + struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data; + + if (index >= data->count) + return -EINVAL; + + memcpy(record, &data->bps[index], sizeof(struct eeprom_umc_record)); + return 0; +} + +bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data; + uint64_t page_pfn = RAS_ADDR_TO_PFN(addr); + int i, ret = false; + + mutex_lock(&ras_umc->umc_lock); + for (i = 0; i < data->count; i++) { + if (data->bps[i].cur_nps_retired_row_pfn == page_pfn) { + ret = true; + break; + } + } + mutex_unlock(&ras_umc->umc_lock); + + return ret; +} + +int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + int ret = 0; + + if (bank_to_pa) + ret = ras_umc->ip_func->bank_to_soc_pa(ras_core, *bank_addr, soc_pa); + else + ret = ras_umc->ip_func->soc_pa_to_bank(ras_core, *soc_pa, bank_addr); + + return ret; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h new file mode 100644 index 000000000000..7d9e779d8c4c --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_UMC_H__ +#define __RAS_UMC_H__ +#include "ras.h" +#include "ras_eeprom.h" +#include "ras_cmd.h" + +#define UMC_VRAM_TYPE_UNKNOWN 0 +#define UMC_VRAM_TYPE_GDDR1 1 +#define UMC_VRAM_TYPE_DDR2 2 +#define UMC_VRAM_TYPE_GDDR3 3 +#define UMC_VRAM_TYPE_GDDR4 4 +#define UMC_VRAM_TYPE_GDDR5 5 +#define UMC_VRAM_TYPE_HBM 6 +#define UMC_VRAM_TYPE_DDR3 7 +#define UMC_VRAM_TYPE_DDR4 8 +#define UMC_VRAM_TYPE_GDDR6 9 +#define UMC_VRAM_TYPE_DDR5 10 +#define UMC_VRAM_TYPE_LPDDR4 11 +#define UMC_VRAM_TYPE_LPDDR5 12 +#define UMC_VRAM_TYPE_HBM3E 13 + +#define UMC_ECC_NEW_DETECTED_TAG 0x1 +#define UMC_INV_MEM_PFN (0xFFFFFFFFFFFFFFFF) + +/* three column bits and one row bit in MCA address flip + * in bad page retirement + */ +#define UMC_PA_FLIP_BITS_NUM 4 + +enum umc_memory_partition_mode { + UMC_MEMORY_PARTITION_MODE_NONE = 0, + UMC_MEMORY_PARTITION_MODE_NPS1 = 1, + UMC_MEMORY_PARTITION_MODE_NPS2 = 2, + UMC_MEMORY_PARTITION_MODE_NPS3 = 3, + UMC_MEMORY_PARTITION_MODE_NPS4 = 4, + UMC_MEMORY_PARTITION_MODE_NPS6 = 6, + UMC_MEMORY_PARTITION_MODE_NPS8 = 8, + UMC_MEMORY_PARTITION_MODE_UNKNOWN +}; + +struct ras_core_context; +struct ras_bank_ecc; + +struct umc_flip_bits { + uint32_t flip_bits_in_pa[UMC_PA_FLIP_BITS_NUM]; + uint32_t flip_row_bit; + uint32_t r13_in_pa; + uint32_t bit_num; +}; + +struct umc_mca_addr { + uint64_t err_addr; + uint32_t ch_inst; + uint32_t umc_inst; + uint32_t node_inst; + uint32_t socket_id; +}; + +struct umc_phy_addr { + uint64_t pa; + uint32_t bank; + uint32_t channel_idx; +}; + +struct umc_bank_addr { + uint32_t stack_id; /* SID */ + uint32_t bank_group; + uint32_t bank; + uint32_t row; + uint32_t column; + uint32_t channel; + uint32_t subchannel; /* Also called Pseudochannel (PC) */ +}; + +struct ras_umc_ip_func { + int (*bank_to_eeprom_record)(struct ras_core_context *ras_core, + struct ras_bank_ecc *bank, struct eeprom_umc_record *record); + int (*eeprom_record_to_nps_record)(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps); + int (*eeprom_record_to_nps_pages)(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps, + uint64_t *pfns, uint32_t num); + int (*bank_to_soc_pa)(struct ras_core_context *ras_core, + struct umc_bank_addr bank_addr, uint64_t *soc_pa); + int (*soc_pa_to_bank)(struct ras_core_context *ras_core, + uint64_t soc_pa, struct umc_bank_addr *bank_addr); +}; + +struct eeprom_store_record { + /* point to data records array */ + struct eeprom_umc_record *bps; + /* the count of entries */ + int count; + /* the space can place new entries */ + int space_left; +}; + +struct ras_umc_err_data { + struct eeprom_store_record rom_data; + struct eeprom_store_record ram_data; + enum umc_memory_partition_mode umc_nps_mode; + uint64_t last_retired_pfn; +}; + +struct ras_umc { + u32 umc_ip_version; + u32 umc_vram_type; + const struct ras_umc_ip_func *ip_func; + struct radix_tree_root root; + struct mutex tree_lock; + struct mutex umc_lock; + struct mutex bank_log_lock; + struct mutex pending_ecc_lock; + struct ras_umc_err_data umc_err_data; + struct list_head pending_ecc_list; +}; + +int ras_umc_sw_init(struct ras_core_context *ras); +int ras_umc_sw_fini(struct ras_core_context *ras); +int ras_umc_hw_init(struct ras_core_context *ras); +int ras_umc_hw_fini(struct ras_core_context *ras); +int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *in, struct umc_phy_addr *out, + uint32_t nps); +int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data); +int ras_umc_log_bad_bank(struct ras_core_context *ras, struct ras_bank_ecc *bank); +int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank); +int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core); +int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core); +int ras_umc_load_bad_pages(struct ras_core_context *ras_core); +int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core); +int ras_umc_clean_badpage_data(struct ras_core_context *ras_core); +int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core, + uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr, + enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record); + +int ras_umc_get_badpage_count(struct ras_core_context *ras_core); +int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record); +bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr); +int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c new file mode 100644 index 000000000000..5d9a11c17a86 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c @@ -0,0 +1,511 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_umc.h" +#include "ras_core_status.h" +#include "ras_umc_v12_0.h" + +#define NumDieInterleaved 4 + +static const uint32_t umc_v12_0_channel_idx_tbl[] + [UMC_V12_0_UMC_INSTANCE_NUM][UMC_V12_0_CHANNEL_INSTANCE_NUM] = { + {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12}, + {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}}, + {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32}, + {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}}, + {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64}, + {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}}, + {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108}, + {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}} +}; + +/* mapping of MCA error address to normalized address */ +static const uint32_t umc_v12_0_ma2na_mapping[] = { + 0, 5, 6, 8, 9, 14, 12, 13, + 10, 11, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 24, 7, 29, 30, +}; + +static bool umc_v12_0_bit_wise_xor(uint32_t val) +{ + bool result = 0; + int i; + + for (i = 0; i < 32; i++) + result = result ^ ((val >> i) & 0x1); + + return result; +} + +static void __get_nps_pa_flip_bits(struct ras_core_context *ras_core, + enum umc_memory_partition_mode nps, + struct umc_flip_bits *flip_bits) +{ + uint32_t vram_type = ras_core->ras_umc.umc_vram_type; + + /* default setting */ + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT; + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT; + flip_bits->flip_row_bit = 13; + flip_bits->bit_num = 4; + flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT; + + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) { + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT; + flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT; + } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) { + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT; + flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT; + } + + switch (vram_type) { + case UMC_VRAM_TYPE_HBM: + /* other nps modes are taken as nps1 */ + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT; + + break; + case UMC_VRAM_TYPE_HBM3E: + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT; + flip_bits->flip_row_bit = 12; + + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT; + + break; + default: + RAS_DEV_WARN(ras_core->dev, + "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n"); + break; + } +} + +static uint64_t convert_nps_pa_to_row_pa(struct ras_core_context *ras_core, + uint64_t pa, enum umc_memory_partition_mode nps, bool zero_pfn_ok) +{ + struct umc_flip_bits flip_bits = {0}; + uint64_t row_pa; + int i; + + __get_nps_pa_flip_bits(ras_core, nps, &flip_bits); + + row_pa = pa; + /* clear loop bits in soc physical address */ + for (i = 0; i < flip_bits.bit_num; i++) + row_pa &= ~BIT_ULL(flip_bits.flip_bits_in_pa[i]); + + if (!zero_pfn_ok && !RAS_ADDR_TO_PFN(row_pa)) + row_pa |= BIT_ULL(flip_bits.flip_bits_in_pa[2]); + + return row_pa; +} + +static int lookup_bad_pages_in_a_row(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps, + uint64_t *pfns, uint32_t num, + uint64_t seq_no, bool dump) +{ + uint32_t col, col_lower, row, row_lower, idx, row_high; + uint64_t soc_pa, row_pa, column, err_addr; + uint64_t retired_addr = RAS_PFN_TO_ADDR(record->cur_nps_retired_row_pfn); + struct umc_flip_bits flip_bits = {0}; + uint32_t retire_unit; + uint32_t i; + + __get_nps_pa_flip_bits(ras_core, nps, &flip_bits); + + row_pa = convert_nps_pa_to_row_pa(ras_core, retired_addr, nps, true); + + err_addr = record->address; + /* get column bit 0 and 1 in mca address */ + col_lower = (err_addr >> 1) & 0x3ULL; + /* MA_R13_BIT will be handled later */ + row_lower = (err_addr >> UMC_V12_0_MCA_R0_BIT) & 0x1fffULL; + row_lower &= ~BIT_ULL(flip_bits.flip_row_bit); + + if (ras_core->ras_gfx.gfx_ip_version >= IP_VERSION(9, 5, 0)) { + row_high = (row_pa >> flip_bits.r13_in_pa) & 0x3ULL; + /* it's 2.25GB in each channel, from MCA address to PA + * [R14 R13] is converted if the two bits value are 0x3, + * get them from PA instead of MCA address. + */ + row_lower |= (row_high << 13); + } + + idx = 0; + row = 0; + retire_unit = 0x1 << flip_bits.bit_num; + /* loop for all possibilities of retire bits */ + for (column = 0; column < retire_unit; column++) { + soc_pa = row_pa; + for (i = 0; i < flip_bits.bit_num; i++) + soc_pa |= (((column >> i) & 0x1ULL) << flip_bits.flip_bits_in_pa[i]); + + col = ((column & 0x7) << 2) | col_lower; + + /* add row bit 13 */ + if (flip_bits.bit_num == UMC_PA_FLIP_BITS_NUM) + row = ((column >> 3) << flip_bits.flip_row_bit) | row_lower; + + if (dump) + RAS_DEV_INFO(ras_core->dev, + "{%llu} Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", + seq_no, soc_pa, row, col, + record->cur_nps_bank, record->mem_channel); + + + if (pfns && (idx < num)) + pfns[idx++] = RAS_ADDR_TO_PFN(soc_pa); + } + + return idx; +} + +static int umc_v12_convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out, + uint32_t nps) +{ + uint32_t i, na_shift; + uint64_t soc_pa, na, na_nps; + uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row; + uint32_t bank0, bank1, bank2, bank3, bank; + uint32_t ch_inst = addr_in->ch_inst; + uint32_t umc_inst = addr_in->umc_inst; + uint32_t node_inst = addr_in->node_inst; + uint32_t socket_id = addr_in->socket_id; + uint32_t channel_index; + uint64_t err_addr = addr_in->err_addr; + + if (node_inst != UMC_INV_AID_NODE) { + if (ch_inst >= UMC_V12_0_CHANNEL_INSTANCE_NUM || + umc_inst >= UMC_V12_0_UMC_INSTANCE_NUM || + node_inst >= UMC_V12_0_AID_NUM_MAX || + socket_id >= UMC_V12_0_SOCKET_NUM_MAX) + return -EINVAL; + } else { + if (socket_id >= UMC_V12_0_SOCKET_NUM_MAX || + ch_inst >= UMC_V12_0_TOTAL_CHANNEL_NUM) + return -EINVAL; + } + + bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL; + bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL; + bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL; + bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL; + col = (err_addr >> 1) & 0x1fULL; + row = (err_addr >> 10) & 0x3fffULL; + + /* apply bank hash algorithm */ + bank0 = + bank_hash0 ^ (UMC_V12_0_XOR_EN0 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0)))); + bank1 = + bank_hash1 ^ (UMC_V12_0_XOR_EN1 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1)))); + bank2 = + bank_hash2 ^ (UMC_V12_0_XOR_EN2 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2)))); + bank3 = + bank_hash3 ^ (UMC_V12_0_XOR_EN3 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3)))); + + bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3); + err_addr &= ~0x3c0ULL; + err_addr |= (bank << UMC_V12_0_MCA_B0_BIT); + + na_nps = 0x0; + /* convert mca error address to normalized address */ + for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++) + na_nps |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i]; + + if (nps == UMC_MEMORY_PARTITION_MODE_NPS1) + na_shift = 8; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) + na_shift = 9; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) + na_shift = 10; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8) + na_shift = 11; + else + return -EINVAL; + + na = ((na_nps >> na_shift) << 8) | (na_nps & 0xff); + + if (node_inst != UMC_INV_AID_NODE) + channel_index = + umc_v12_0_channel_idx_tbl[node_inst][umc_inst][ch_inst]; + else { + channel_index = ch_inst; + node_inst = channel_index / + (UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM); + } + + /* translate umc channel address to soc pa, 3 parts are included */ + soc_pa = ADDR_OF_32KB_BLOCK(na) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(na); + + /* calc channel hash based on absolute address */ + soc_pa += socket_id * SOCKET_LFB_SIZE; + /* the umc channel bits are not original values, they are hashed */ + UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa); + /* restore pa */ + soc_pa -= socket_id * SOCKET_LFB_SIZE; + + /* get some channel bits from na_nps directly and + * add nps section offset + */ + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) { + soc_pa &= ~(0x1ULL << UMC_V12_0_PA_CH5_BIT); + soc_pa |= ((na_nps & 0x100) << 5); + soc_pa += (node_inst >> 1) * (SOCKET_LFB_SIZE >> 1); + } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) { + soc_pa &= ~(0x3ULL << UMC_V12_0_PA_CH4_BIT); + soc_pa |= ((na_nps & 0x300) << 4); + soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2); + } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8) { + soc_pa &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); + soc_pa |= ((na_nps & 0x700) << 4); + soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2) + + (channel_index >> 4) * (SOCKET_LFB_SIZE >> 3); + } + + addr_out->pa = soc_pa; + addr_out->bank = bank; + addr_out->channel_idx = channel_index; + + return 0; +} + +static int convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out, + uint32_t nps) +{ + int ret; + + if (ras_psp_check_supported_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS)) + ret = ras_umc_psp_convert_ma_to_pa(ras_core, + addr_in, addr_out, nps); + else + ret = umc_v12_convert_ma_to_pa(ras_core, + addr_in, addr_out, nps); + + return ret; +} + +static int convert_bank_to_nps_addr(struct ras_core_context *ras_core, + struct ras_bank_ecc *bank, struct umc_phy_addr *pa_addr, uint32_t nps) +{ + struct umc_mca_addr addr_in; + struct umc_phy_addr addr_out; + int ret; + + memset(&addr_in, 0, sizeof(addr_in)); + memset(&addr_out, 0, sizeof(addr_out)); + + addr_in.err_addr = ACA_ADDR_2_ERR_ADDR(bank->addr); + addr_in.ch_inst = ACA_IPID_2_UMC_CH(bank->ipid); + addr_in.umc_inst = ACA_IPID_2_UMC_INST(bank->ipid); + addr_in.node_inst = ACA_IPID_2_DIE_ID(bank->ipid); + addr_in.socket_id = ACA_IPID_2_SOCKET_ID(bank->ipid); + + ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps); + if (!ret) { + pa_addr->pa = + convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false); + pa_addr->channel_idx = addr_out.channel_idx; + pa_addr->bank = addr_out.bank; + } + + return ret; +} + +static int umc_v12_0_bank_to_eeprom_record(struct ras_core_context *ras_core, + struct ras_bank_ecc *bank, struct eeprom_umc_record *record) +{ + struct umc_phy_addr nps_addr; + int ret; + + memset(&nps_addr, 0, sizeof(nps_addr)); + + ret = convert_bank_to_nps_addr(ras_core, bank, + &nps_addr, bank->nps); + if (ret) + return ret; + + ras_umc_fill_eeprom_record(ras_core, + ACA_ADDR_2_ERR_ADDR(bank->addr), ACA_IPID_2_UMC_INST(bank->ipid), + &nps_addr, bank->nps, record); + + lookup_bad_pages_in_a_row(ras_core, record, + bank->nps, NULL, 0, bank->seq_no, true); + + return 0; +} + +static int convert_eeprom_record_to_nps_addr(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint64_t *pa, uint32_t nps) +{ + struct device_system_info dev_info = {0}; + struct umc_mca_addr addr_in; + struct umc_phy_addr addr_out; + int ret; + + memset(&addr_in, 0, sizeof(addr_in)); + memset(&addr_out, 0, sizeof(addr_out)); + + ras_core_get_device_system_info(ras_core, &dev_info); + + addr_in.err_addr = record->address; + addr_in.ch_inst = record->mem_channel; + addr_in.umc_inst = record->mcumc_id; + addr_in.node_inst = UMC_INV_AID_NODE; + addr_in.socket_id = dev_info.socket_id; + + ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps); + if (ret) + return ret; + + *pa = convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false); + + return 0; +} + +static int umc_v12_0_eeprom_record_to_nps_record(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps) +{ + uint64_t pa = 0; + int ret = 0; + + if (nps == EEPROM_RECORD_UMC_NPS_MODE(record)) { + record->cur_nps_retired_row_pfn = EEPROM_RECORD_UMC_ADDR_PFN(record); + } else { + ret = convert_eeprom_record_to_nps_addr(ras_core, + record, &pa, nps); + if (!ret) + record->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(pa); + } + + record->cur_nps = nps; + + return ret; +} + +static int umc_v12_0_eeprom_record_to_nps_pages(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps, + uint64_t *pfns, uint32_t num) +{ + return lookup_bad_pages_in_a_row(ras_core, + record, nps, pfns, num, 0, false); +} + +static int umc_12_0_soc_pa_to_bank(struct ras_core_context *ras_core, + uint64_t soc_pa, + struct umc_bank_addr *bank_addr) +{ + + int channel_hashed = 0; + int channel_real = 0; + int channel_reversed = 0; + int i = 0; + + bank_addr->stack_id = UMC_V12_0_SOC_PA_TO_SID(soc_pa); + bank_addr->bank_group = 0; /* This is a combination of SID & Bank. Needed?? */ + bank_addr->bank = UMC_V12_0_SOC_PA_TO_BANK(soc_pa); + bank_addr->row = UMC_V12_0_SOC_PA_TO_ROW(soc_pa); + bank_addr->column = UMC_V12_0_SOC_PA_TO_COL(soc_pa); + + /* Channel bits 4-6 are hashed. Bruteforce reverse the hash */ + channel_hashed = (soc_pa >> UMC_V12_0_PA_CH4_BIT) & 0x7; + + for (i = 0; i < 8; i++) { + channel_reversed = 0; + channel_reversed |= UMC_V12_0_CHANNEL_HASH_CH4((i << 4), soc_pa); + channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH5((i << 4), soc_pa) << 1); + channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH6((i << 4), soc_pa) << 2); + if (channel_reversed == channel_hashed) + channel_real = ((i << 4)) | ((soc_pa >> UMC_V12_0_PA_CH0_BIT) & 0xf); + } + + bank_addr->channel = channel_real; + bank_addr->subchannel = UMC_V12_0_SOC_PA_TO_PC(soc_pa); + + return 0; +} + +static int umc_12_0_bank_to_soc_pa(struct ras_core_context *ras_core, + struct umc_bank_addr bank_addr, + uint64_t *soc_pa) +{ + uint64_t na = 0; + uint64_t tmp_pa = 0; + *soc_pa = 0; + + tmp_pa |= UMC_V12_0_SOC_SID_TO_PA(bank_addr.stack_id); + tmp_pa |= UMC_V12_0_SOC_BANK_TO_PA(bank_addr.bank); + tmp_pa |= UMC_V12_0_SOC_ROW_TO_PA(bank_addr.row); + tmp_pa |= UMC_V12_0_SOC_COL_TO_PA(bank_addr.column); + tmp_pa |= UMC_V12_0_SOC_CH_TO_PA(bank_addr.channel); + tmp_pa |= UMC_V12_0_SOC_PC_TO_PA(bank_addr.subchannel); + + /* Get the NA */ + na = ((tmp_pa >> UMC_V12_0_PA_C2_BIT) << UMC_V12_0_NA_C2_BIT); + na |= tmp_pa & 0xff; + + /* translate umc channel address to soc pa, 3 parts are included */ + tmp_pa = ADDR_OF_32KB_BLOCK(na) | + ADDR_OF_256B_BLOCK(bank_addr.channel) | + OFFSET_IN_256B_BLOCK(na); + + /* the umc channel bits are not original values, they are hashed */ + UMC_V12_0_SET_CHANNEL_HASH(bank_addr.channel, tmp_pa); + + *soc_pa = tmp_pa; + + return 0; +} + +const struct ras_umc_ip_func ras_umc_func_v12_0 = { + .bank_to_eeprom_record = umc_v12_0_bank_to_eeprom_record, + .eeprom_record_to_nps_record = umc_v12_0_eeprom_record_to_nps_record, + .eeprom_record_to_nps_pages = umc_v12_0_eeprom_record_to_nps_pages, + .bank_to_soc_pa = umc_12_0_bank_to_soc_pa, + .soc_pa_to_bank = umc_12_0_soc_pa_to_bank, +}; + diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h new file mode 100644 index 000000000000..8a35ad856165 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_UMC_V12_0_H__ +#define __RAS_UMC_V12_0_H__ +#include "ras.h" + +/* MCA_UMC_UMC0_MCUMC_ADDRT0 */ +#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL +#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xFF00000000000000L + +/* MCMP1_IPIDT0 */ +#define MCMP1_IPIDT0__InstanceIdLo__SHIFT 0x0 +#define MCMP1_IPIDT0__HardwareID__SHIFT 0x20 +#define MCMP1_IPIDT0__InstanceIdHi__SHIFT 0x2c +#define MCMP1_IPIDT0__McaType__SHIFT 0x30 + +#define MCMP1_IPIDT0__InstanceIdLo_MASK 0x00000000FFFFFFFFL +#define MCMP1_IPIDT0__HardwareID_MASK 0x00000FFF00000000L +#define MCMP1_IPIDT0__InstanceIdHi_MASK 0x0000F00000000000L +#define MCMP1_IPIDT0__McaType_MASK 0xFFFF000000000000L + +/* number of umc channel instance with memory map register access */ +#define UMC_V12_0_CHANNEL_INSTANCE_NUM 8 +/* number of umc instance with memory map register access */ +#define UMC_V12_0_UMC_INSTANCE_NUM 4 + +/* one piece of normalized address is mapped to 8 pieces of physical address */ +#define UMC_V12_0_NA_MAP_PA_NUM 8 + +/* bank bits in MCA error address */ +#define UMC_V12_0_MCA_B0_BIT 6 +#define UMC_V12_0_MCA_B1_BIT 7 +#define UMC_V12_0_MCA_B2_BIT 8 +#define UMC_V12_0_MCA_B3_BIT 9 + +/* row bits in MCA address */ +#define UMC_V12_0_MCA_R0_BIT 10 + +/* Stack ID bits in SOC physical address */ +#define UMC_V12_0_PA_SID1_BIT 37 +#define UMC_V12_0_PA_SID0_BIT 36 + +/* bank bits in SOC physical address */ +#define UMC_V12_0_PA_B3_BIT 18 +#define UMC_V12_0_PA_B2_BIT 17 +#define UMC_V12_0_PA_B1_BIT 20 +#define UMC_V12_0_PA_B0_BIT 19 + +/* row bits in SOC physical address */ +#define UMC_V12_0_PA_R13_BIT 35 +#define UMC_V12_0_PA_R12_BIT 34 +#define UMC_V12_0_PA_R11_BIT 33 +#define UMC_V12_0_PA_R10_BIT 32 +#define UMC_V12_0_PA_R9_BIT 31 +#define UMC_V12_0_PA_R8_BIT 30 +#define UMC_V12_0_PA_R7_BIT 29 +#define UMC_V12_0_PA_R6_BIT 28 +#define UMC_V12_0_PA_R5_BIT 27 +#define UMC_V12_0_PA_R4_BIT 26 +#define UMC_V12_0_PA_R3_BIT 25 +#define UMC_V12_0_PA_R2_BIT 24 +#define UMC_V12_0_PA_R1_BIT 23 +#define UMC_V12_0_PA_R0_BIT 22 + +/* column bits in SOC physical address */ +#define UMC_V12_0_PA_C4_BIT 21 +#define UMC_V12_0_PA_C3_BIT 16 +#define UMC_V12_0_PA_C2_BIT 15 +#define UMC_V12_0_PA_C1_BIT 6 +#define UMC_V12_0_PA_C0_BIT 5 + +/* channel index bits in SOC physical address */ +#define UMC_V12_0_PA_CH6_BIT 14 +#define UMC_V12_0_PA_CH5_BIT 13 +#define UMC_V12_0_PA_CH4_BIT 12 +#define UMC_V12_0_PA_CH3_BIT 11 +#define UMC_V12_0_PA_CH2_BIT 10 +#define UMC_V12_0_PA_CH1_BIT 9 +#define UMC_V12_0_PA_CH0_BIT 8 + +/* Pseudochannel index bits in SOC physical address */ +#define UMC_V12_0_PA_PC0_BIT 7 + +#define UMC_V12_0_NA_C2_BIT 8 + +#define UMC_V12_0_SOC_PA_TO_SID(pa) \ + ((((pa >> UMC_V12_0_PA_SID0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_SID1_BIT) & 0x1ULL) << 1ULL)) + +#define UMC_V12_0_SOC_PA_TO_BANK(pa) \ + ((((pa >> UMC_V12_0_PA_B0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_B1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_B2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_B3_BIT) & 0x1ULL) << 3ULL)) + +#define UMC_V12_0_SOC_PA_TO_ROW(pa) \ + ((((pa >> UMC_V12_0_PA_R0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_R1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_R2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_R3_BIT) & 0x1ULL) << 3ULL) | \ + (((pa >> UMC_V12_0_PA_R4_BIT) & 0x1ULL) << 4ULL) | \ + (((pa >> UMC_V12_0_PA_R5_BIT) & 0x1ULL) << 5ULL) | \ + (((pa >> UMC_V12_0_PA_R6_BIT) & 0x1ULL) << 6ULL) | \ + (((pa >> UMC_V12_0_PA_R7_BIT) & 0x1ULL) << 7ULL) | \ + (((pa >> UMC_V12_0_PA_R8_BIT) & 0x1ULL) << 8ULL) | \ + (((pa >> UMC_V12_0_PA_R9_BIT) & 0x1ULL) << 9ULL) | \ + (((pa >> UMC_V12_0_PA_R10_BIT) & 0x1ULL) << 10ULL) | \ + (((pa >> UMC_V12_0_PA_R11_BIT) & 0x1ULL) << 11ULL) | \ + (((pa >> UMC_V12_0_PA_R12_BIT) & 0x1ULL) << 12ULL) | \ + (((pa >> UMC_V12_0_PA_R13_BIT) & 0x1ULL) << 13ULL)) + +#define UMC_V12_0_SOC_PA_TO_COL(pa) \ + ((((pa >> UMC_V12_0_PA_C0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_C1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_C2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_C3_BIT) & 0x1ULL) << 3ULL) | \ + (((pa >> UMC_V12_0_PA_C4_BIT) & 0x1ULL) << 4ULL)) + +#define UMC_V12_0_SOC_PA_TO_CH(pa) \ + ((((pa >> UMC_V12_0_PA_CH0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_CH1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_CH2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_CH3_BIT) & 0x1ULL) << 3ULL) | \ + (((pa >> UMC_V12_0_PA_CH4_BIT) & 0x1ULL) << 4ULL) | \ + (((pa >> UMC_V12_0_PA_CH5_BIT) & 0x1ULL) << 5ULL) | \ + (((pa >> UMC_V12_0_PA_CH6_BIT) & 0x1ULL) << 6ULL)) + +#define UMC_V12_0_SOC_PA_TO_PC(pa) (((pa >> UMC_V12_0_PA_PC0_BIT) & 0x1ULL) << 0ULL) + +#define UMC_V12_0_SOC_SID_TO_PA(sid) \ + ((((sid >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_SID0_BIT) | \ + (((sid >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_SID1_BIT)) + +#define UMC_V12_0_SOC_BANK_TO_PA(bank) \ + ((((bank >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_B0_BIT) | \ + (((bank >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_B1_BIT) | \ + (((bank >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_B2_BIT) | \ + (((bank >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_B3_BIT)) + +#define UMC_V12_0_SOC_ROW_TO_PA(row) \ + ((((row >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_R0_BIT) | \ + (((row >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_R1_BIT) | \ + (((row >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_R2_BIT) | \ + (((row >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_R3_BIT) | \ + (((row >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_R4_BIT) | \ + (((row >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_R5_BIT) | \ + (((row >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_R6_BIT) | \ + (((row >> 7ULL) & 0x1ULL) << UMC_V12_0_PA_R7_BIT) | \ + (((row >> 8ULL) & 0x1ULL) << UMC_V12_0_PA_R8_BIT) | \ + (((row >> 9ULL) & 0x1ULL) << UMC_V12_0_PA_R9_BIT) | \ + (((row >> 10ULL) & 0x1ULL) << UMC_V12_0_PA_R10_BIT) | \ + (((row >> 11ULL) & 0x1ULL) << UMC_V12_0_PA_R11_BIT) | \ + (((row >> 12ULL) & 0x1ULL) << UMC_V12_0_PA_R12_BIT) | \ + (((row >> 13ULL) & 0x1ULL) << UMC_V12_0_PA_R13_BIT)) + +#define UMC_V12_0_SOC_COL_TO_PA(col) \ + ((((col >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_C0_BIT) | \ + (((col >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_C1_BIT) | \ + (((col >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_C2_BIT) | \ + (((col >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_C3_BIT) | \ + (((col >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_C4_BIT)) + +#define UMC_V12_0_SOC_CH_TO_PA(ch) \ + ((((ch >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_CH0_BIT) | \ + (((ch >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_CH1_BIT) | \ + (((ch >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_CH2_BIT) | \ + (((ch >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_CH3_BIT) | \ + (((ch >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_CH4_BIT) | \ + (((ch >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_CH5_BIT) | \ + (((ch >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_CH6_BIT)) + +#define UMC_V12_0_SOC_PC_TO_PA(pc) (((pc >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_PC0_BIT) + +/* bank hash settings */ +#define UMC_V12_0_XOR_EN0 1 +#define UMC_V12_0_XOR_EN1 1 +#define UMC_V12_0_XOR_EN2 1 +#define UMC_V12_0_XOR_EN3 1 +#define UMC_V12_0_COL_XOR0 0x0 +#define UMC_V12_0_COL_XOR1 0x0 +#define UMC_V12_0_COL_XOR2 0x800 +#define UMC_V12_0_COL_XOR3 0x1000 +#define UMC_V12_0_ROW_XOR0 0x11111 +#define UMC_V12_0_ROW_XOR1 0x22222 +#define UMC_V12_0_ROW_XOR2 0x4444 +#define UMC_V12_0_ROW_XOR3 0x8888 + +/* channel hash settings */ +#define UMC_V12_0_HASH_4K 0 +#define UMC_V12_0_HASH_64K 1 +#define UMC_V12_0_HASH_2M 1 +#define UMC_V12_0_HASH_1G 1 +#define UMC_V12_0_HASH_1T 1 + +/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA), + * hash bit is only effective when related setting is enabled + */ +#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \ + (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \ + (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \ + (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \ + (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \ + (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \ + } while (0) + + +/* + * (addr / 256) * 4096, the higher 26 bits in ErrorAddr + * is the index of 4KB block + */ +#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4) +/* + * (addr / 256) * 8192, the higher 26 bits in ErrorAddr + * is the index of 8KB block + */ +#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) +/* + * (addr / 256) * 32768, the higher 26 bits in ErrorAddr + * is the index of 8KB block + */ +#define ADDR_OF_32KB_BLOCK(addr) (((addr) & ~0xffULL) << 7) +/* channel index is the index of 256B block */ +#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) +/* offset in 256B block */ +#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) + + +#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \ + ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \ + (0x1ULL << UMC_V12_0_PA_C4_BIT) | \ + (0x1ULL << UMC_V12_0_PA_R13_BIT))) + +#define ACA_IPID_HI_2_UMC_AID(_ipid_hi) (((_ipid_hi) >> 2) & 0x3) +#define ACA_IPID_LO_2_UMC_CH(_ipid_lo) \ + (((((_ipid_lo) >> 20) & 0x1) * 4) + (((_ipid_lo) >> 12) & 0xF)) +#define ACA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7) + +#define ACA_IPID_2_DIE_ID(ipid) ((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) >> 2) & 0x03) +#define ACA_IPID_2_UMC_CH(ipid) \ + (ACA_IPID_LO_2_UMC_CH(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo))) + +#define ACA_IPID_2_UMC_INST(ipid) \ + (ACA_IPID_LO_2_UMC_INST(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo))) + +#define ACA_IPID_2_SOCKET_ID(ipid) \ + (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \ + (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03)) + +#define ACA_ADDR_2_ERR_ADDR(addr) \ + REG_GET_FIELD(addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr) + +/* R13 bit shift should be considered, double the number */ +#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2) + + +/* C2, C3, C4, R13, four MCA bits are looped in page retirement */ +#define UMC_V12_0_RETIRE_LOOP_BITS 4 + +/* invalid node instance value */ +#define UMC_INV_AID_NODE 0xffff + +#define UMC_V12_0_AID_NUM_MAX 4 +#define UMC_V12_0_SOCKET_NUM_MAX 8 + +#define UMC_V12_0_TOTAL_CHANNEL_NUM \ + (UMC_V12_0_AID_NUM_MAX * UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM) + +/* one device has 192GB HBM */ +#define SOCKET_LFB_SIZE 0x3000000000ULL + +extern const struct ras_umc_ip_func ras_umc_func_v12_0; + +int ras_umc_get_badpage_count(struct ras_core_context *ras_core); +int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record); +#endif + diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 2ad33559a33a..5a66948ffd24 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -111,6 +111,7 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc, static int komeda_crtc_prepare(struct komeda_crtc *kcrtc) { + struct drm_device *drm = kcrtc->base.dev; struct komeda_dev *mdev = kcrtc->base.dev->dev_private; struct komeda_pipeline *master = kcrtc->master; struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state); @@ -128,8 +129,8 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc) err = mdev->funcs->change_opmode(mdev, new_mode); if (err) { - DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,", - mdev->dpmode, new_mode); + drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,", + mdev->dpmode, new_mode); goto unlock; } @@ -142,18 +143,18 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc) if (new_mode != KOMEDA_MODE_DUAL_DISP) { err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st)); if (err) - DRM_ERROR("failed to set aclk.\n"); + drm_err(drm, "failed to set aclk.\n"); err = clk_prepare_enable(mdev->aclk); if (err) - DRM_ERROR("failed to enable aclk.\n"); + drm_err(drm, "failed to enable aclk.\n"); } err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000); if (err) - DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id); + drm_err(drm, "failed to set pxlclk for pipe%d\n", master->id); err = clk_prepare_enable(master->pxlclk); if (err) - DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id); + drm_err(drm, "failed to enable pxl clk for pipe%d.\n", master->id); unlock: mutex_unlock(&mdev->lock); @@ -164,6 +165,7 @@ unlock: static int komeda_crtc_unprepare(struct komeda_crtc *kcrtc) { + struct drm_device *drm = kcrtc->base.dev; struct komeda_dev *mdev = kcrtc->base.dev->dev_private; struct komeda_pipeline *master = kcrtc->master; u32 new_mode; @@ -180,8 +182,8 @@ komeda_crtc_unprepare(struct komeda_crtc *kcrtc) err = mdev->funcs->change_opmode(mdev, new_mode); if (err) { - DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,", - mdev->dpmode, new_mode); + drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,", + mdev->dpmode, new_mode); goto unlock; } @@ -200,6 +202,7 @@ unlock: void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, struct komeda_events *evts) { + struct drm_device *drm = kcrtc->base.dev; struct drm_crtc *crtc = &kcrtc->base; u32 events = evts->pipes[kcrtc->master->id]; @@ -212,7 +215,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, if (wb_conn) drm_writeback_signal_completion(&wb_conn->base, 0); else - DRM_WARN("CRTC[%d]: EOW happen but no wb_connector.\n", + drm_warn(drm, "CRTC[%d]: EOW happen but no wb_connector.\n", drm_crtc_index(&kcrtc->base)); } /* will handle it together with the write back support */ @@ -236,7 +239,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, crtc->state->event = NULL; drm_crtc_send_vblank_event(crtc, event); } else { - DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n", + drm_warn(drm, "CRTC[%d]: FLIP happened but no pending commit.\n", drm_crtc_index(&kcrtc->base)); } spin_unlock_irqrestore(&crtc->dev->event_lock, flags); @@ -309,7 +312,7 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc, /* wait the flip take affect.*/ if (wait_for_completion_timeout(flip_done, HZ) == 0) { - DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id); + drm_err(drm, "wait pipe%d flip done timeout\n", kcrtc->master->id); if (!input_flip_done) { unsigned long flags; @@ -562,6 +565,7 @@ static const struct drm_crtc_funcs komeda_crtc_funcs = { int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev) { + struct drm_device *drm = &kms->base; struct komeda_crtc *crtc; struct komeda_pipeline *master; char str[16]; @@ -581,7 +585,7 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, else sprintf(str, "None"); - DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n", + drm_info(drm, "CRTC-%d: master(pipe-%d) slave(%s).\n", kms->n_crtcs, master->id, str); kms->n_crtcs++; @@ -613,6 +617,7 @@ static int komeda_attach_bridge(struct device *dev, struct komeda_pipeline *pipe, struct drm_encoder *encoder) { + struct drm_device *drm = encoder->dev; struct drm_bridge *bridge; int err; @@ -624,7 +629,7 @@ static int komeda_attach_bridge(struct device *dev, err = drm_bridge_attach(encoder, bridge, NULL, 0); if (err) - dev_err(dev, "bridge_attach() failed for pipe: %s\n", + drm_err(drm, "bridge_attach() failed for pipe: %s\n", of_node_full_name(pipe->of_node)); return err; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c index 901f938aefe0..3ca461eb0a24 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c @@ -9,6 +9,7 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include "komeda_framebuffer.h" #include "komeda_dev.h" diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 806da0aaedf7..4b4a08cb396d 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -22,6 +22,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index c3179d74f3f5..81d45f2dd6a7 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -33,6 +33,7 @@ #include <drm/drm_modeset_helper.h> #include <drm/drm_module.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index bc5f5e9798c3..b765f6c9eea4 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -29,6 +29,7 @@ #include <drm/drm_modeset_helper.h> #include <drm/drm_module.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 600af5ad81b1..47733c85d271 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -14,6 +14,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_writeback.h> diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 87f2e5ee8790..f1a5014bcfa1 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -263,7 +263,7 @@ static int malidp_se_check_scaling(struct malidp_plane *mp, struct drm_plane_state *state) { struct drm_crtc_state *crtc_state = - drm_atomic_get_existing_crtc_state(state->state, state->crtc); + drm_atomic_get_new_crtc_state(state->state, state->crtc); struct malidp_crtc_state *mc; u32 src_w, src_h; int ret; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 0900e4466ffb..033b19b31f63 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -13,6 +13,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c index a763349dd89f..2445365c823f 100644 --- a/drivers/gpu/drm/armada/armada_debugfs.c +++ b/drivers/gpu/drm/armada/armada_debugfs.c @@ -12,6 +12,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include "armada_crtc.h" #include "armada_drm.h" diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c index aa4289127086..77098928f821 100644 --- a/drivers/gpu/drm/armada/armada_fb.c +++ b/drivers/gpu/drm/armada/armada_fb.c @@ -6,6 +6,7 @@ #include <drm/drm_modeset_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include "armada_drm.h" #include "armada_fb.h" diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index cb53cc91bafb..8bbae94804f8 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -13,6 +13,7 @@ #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include "armada_crtc.h" #include "armada_drm.h" @@ -28,8 +29,6 @@ static void armada_fbdev_fb_destroy(struct fb_info *info) fbh->fb->funcs->destroy(fbh->fb); drm_client_release(&fbh->client); - drm_fb_helper_unprepare(fbh); - kfree(fbh); } static const struct fb_ops armada_fb_ops = { @@ -45,10 +44,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = fbh->dev; + struct fb_info *info = fbh->info; struct drm_mode_fb_cmd2 mode; struct armada_framebuffer *dfb; struct armada_gem_object *obj; - struct fb_info *info; int size, ret; void *ptr; @@ -92,12 +91,6 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, if (IS_ERR(dfb)) return PTR_ERR(dfb); - info = drm_fb_helper_alloc_info(fbh); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_fballoc; - } - info->fbops = &armada_fb_ops; info->fix.smem_start = obj->phys_addr; info->fix.smem_len = obj->obj.size; @@ -113,8 +106,4 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, (unsigned long long)obj->phys_addr); return 0; - - err_fballoc: - dfb->fb.funcs->destroy(&dfb->fb); - return ret; } diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 1a1680d71486..35fcfa0d85ff 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -10,6 +10,7 @@ #include <drm/armada_drm.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include "armada_drm.h" #include "armada_gem.h" diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 3b9bd8ecda13..21fd3b4ba10f 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -12,6 +12,7 @@ #include <drm/drm_atomic_uapi.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> #include "armada_crtc.h" #include "armada_drm.h" diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c index cc47c032dbc1..a0326b4f568e 100644 --- a/drivers/gpu/drm/armada/armada_plane.c +++ b/drivers/gpu/drm/armada/armada_plane.c @@ -8,6 +8,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> #include "armada_crtc.h" #include "armada_drm.h" @@ -94,12 +95,7 @@ int armada_drm_plane_atomic_check(struct drm_plane *plane, return 0; } - if (state) - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); - else - crtc_state = crtc->state; - + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 0, INT_MAX, true, false); diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile index 2547613155da..cdbcba3b43ad 100644 --- a/drivers/gpu/drm/ast/Makefile +++ b/drivers/gpu/drm/ast/Makefile @@ -6,7 +6,9 @@ ast-y := \ ast_2000.o \ ast_2100.o \ + ast_2200.o \ ast_2300.o \ + ast_2400.o \ ast_2500.o \ ast_2600.o \ ast_cursor.o \ @@ -14,7 +16,6 @@ ast-y := \ ast_dp501.o \ ast_dp.o \ ast_drv.o \ - ast_main.o \ ast_mm.o \ ast_mode.o \ ast_post.o \ diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c index 41c2aa1e425a..fa3bc23ce098 100644 --- a/drivers/gpu/drm/ast/ast_2000.c +++ b/drivers/gpu/drm/ast/ast_2000.c @@ -27,6 +27,9 @@ */ #include <linux/delay.h> +#include <linux/pci.h> + +#include <drm/drm_drv.h> #include "ast_drv.h" #include "ast_post.h" @@ -147,3 +150,108 @@ int ast_2000_post(struct ast_device *ast) return 0; } + +/* + * Mode setting + */ + +const struct ast_vbios_dclk_info ast_2000_dclk_table[] = { + {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xee, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xc6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7b, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */ + {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */ + {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0d: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */ + {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x77, 0x58, 0x80}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x3b, 0x2c, 0x81}, /* 1a: VCLK118_25 */ +}; + +/* + * Device initialization + */ + +void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post) +{ + enum ast_tx_chip tx_chip = AST_TX_NONE; + u8 vgacra3; + + /* + * VGACRA3 Enhanced Color Mode Register, check if DVO is already + * enabled, in that case, assume we have a SIL164 TMDS transmitter + * + * Don't make that assumption if we the chip wasn't enabled and + * is at power-on reset, otherwise we'll incorrectly "detect" a + * SIL164 when there is none. + */ + if (!need_post) { + vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff); + if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED) + tx_chip = AST_TX_SIL164; + } + + __ast_device_set_tx_chip(ast, tx_chip); +} + +static const struct ast_device_quirks ast_2000_device_quirks = { + .crtc_mem_req_threshold_low = 31, + .crtc_mem_req_threshold_high = 47, +}; + +struct drm_device *ast_2000_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2000_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2000_detect_tx_chip(ast, need_post); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c index 829e3b8b0d19..05aeb0624d41 100644 --- a/drivers/gpu/drm/ast/ast_2100.c +++ b/drivers/gpu/drm/ast/ast_2100.c @@ -27,6 +27,9 @@ */ #include <linux/delay.h> +#include <linux/pci.h> + +#include <drm/drm_drv.h> #include "ast_drv.h" #include "ast_post.h" @@ -386,3 +389,92 @@ int ast_2100_post(struct ast_device *ast) return 0; } + +/* + * Widescreen detection + */ + +/* Try to detect WSXGA+ on Gen2+ */ +bool __ast_2100_detect_wsxga_p(struct ast_device *ast) +{ + u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0); + + if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC)) + return true; + if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN) + return true; + + return false; +} + +/* Try to detect WUXGA on Gen2+ */ +bool __ast_2100_detect_wuxga(struct ast_device *ast) +{ + u8 vgacrd1; + + if (ast->support_fullhd) { + vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1); + if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA)) + return true; + } + + return false; +} + +static void ast_2100_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast)) { + ast->support_wsxga_p = true; + if (ast->chip == AST2100) + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2100_device_quirks = { + .crtc_mem_req_threshold_low = 47, + .crtc_mem_req_threshold_high = 63, +}; + +struct drm_device *ast_2100_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2100_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2000_detect_tx_chip(ast, need_post); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ast_2100_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2200.c b/drivers/gpu/drm/ast/ast_2200.c new file mode 100644 index 000000000000..b64345d11ffa --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2200.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ +/* + * Authors: Dave Airlie <airlied@redhat.com> + */ + +#include <linux/pci.h> + +#include <drm/drm_drv.h> + +#include "ast_drv.h" + +static void ast_2200_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast)) { + ast->support_wsxga_p = true; + if (ast->chip == AST2200) + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2200_device_quirks = { + .crtc_mem_req_threshold_low = 47, + .crtc_mem_req_threshold_high = 63, +}; + +struct drm_device *ast_2200_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2200_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2000_detect_tx_chip(ast, need_post); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ast_2200_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} + diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c index dc2a32244689..5f50d9f91ffd 100644 --- a/drivers/gpu/drm/ast/ast_2300.c +++ b/drivers/gpu/drm/ast/ast_2300.c @@ -27,6 +27,12 @@ */ #include <linux/delay.h> +#include <linux/pci.h> +#include <linux/sizes.h> + +#include <drm/drm_drv.h> +#include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "ast_drv.h" #include "ast_post.h" @@ -1326,3 +1332,132 @@ int ast_2300_post(struct ast_device *ast) return 0; } + +/* + * Device initialization + */ + +void ast_2300_detect_tx_chip(struct ast_device *ast) +{ + enum ast_tx_chip tx_chip = AST_TX_NONE; + struct drm_device *dev = &ast->base; + u8 vgacrd1; + + /* + * On AST GEN4+, look at the configuration set by the SoC in + * the SOC scratch register #1 bits 11:8 (interestingly marked + * as "reserved" in the spec) + */ + vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, + AST_IO_VGACRD1_TX_TYPE_MASK); + switch (vgacrd1) { + /* + * GEN4 to GEN6 + */ + case AST_IO_VGACRD1_TX_SIL164_VBIOS: + tx_chip = AST_TX_SIL164; + break; + case AST_IO_VGACRD1_TX_DP501_VBIOS: + ast->dp501_fw_addr = drmm_kzalloc(dev, SZ_32K, GFP_KERNEL); + if (ast->dp501_fw_addr) { + /* backup firmware */ + if (ast_backup_fw(ast, ast->dp501_fw_addr, SZ_32K)) { + drmm_kfree(dev, ast->dp501_fw_addr); + ast->dp501_fw_addr = NULL; + } + } + fallthrough; + case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: + tx_chip = AST_TX_DP501; + break; + /* + * GEN7+ + */ + case AST_IO_VGACRD1_TX_ASTDP: + tx_chip = AST_TX_ASTDP; + break; + /* + * Several of the listed TX chips are not explicitly supported + * by the ast driver. If these exist in real-world devices, they + * are most likely reported as VGA or SIL164 outputs. We warn here + * to get bug reports for these devices. If none come in for some + * time, we can begin to fail device probing on these values. + */ + case AST_IO_VGACRD1_TX_ITE66121_VBIOS: + drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + break; + case AST_IO_VGACRD1_TX_CH7003_VBIOS: + drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + break; + case AST_IO_VGACRD1_TX_ANX9807_VBIOS: + drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + break; + } + + __ast_device_set_tx_chip(ast, tx_chip); +} + +static void ast_2300_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1300) { + ast->support_wsxga_p = true; + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2300_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, +}; + +struct drm_device *ast_2300_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2300_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2300_detect_tx_chip(ast); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (ast->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ast_2300_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2400.c b/drivers/gpu/drm/ast/ast_2400.c new file mode 100644 index 000000000000..2e6befd24f91 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2400.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie <airlied@redhat.com> + */ + +#include <linux/pci.h> + +#include <drm/drm_drv.h> +#include <drm/drm_print.h> + +#include "ast_drv.h" + +static void ast_2400_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1400) { + ast->support_wsxga_p = true; + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2400_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, +}; + +struct drm_device *ast_2400_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2400_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2300_detect_tx_chip(ast); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (ast->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ast_2400_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c index 1e541498ea67..2a52af0ded56 100644 --- a/drivers/gpu/drm/ast/ast_2500.c +++ b/drivers/gpu/drm/ast/ast_2500.c @@ -27,7 +27,9 @@ */ #include <linux/delay.h> +#include <linux/pci.h> +#include <drm/drm_drv.h> #include <drm/drm_print.h> #include "ast_drv.h" @@ -567,3 +569,107 @@ int ast_2500_post(struct ast_device *ast) return 0; } + +/* + * Mode setting + */ + +const struct ast_vbios_dclk_info ast_2500_dclk_table[] = { + {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xee, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xc6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7b, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */ + {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */ + {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0d: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */ + {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x58, 0x01, 0x42}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x44, 0x20, 0x43}, /* 1a: VCLK118_25 */ +}; + +/* + * Device initialization + */ + +static void ast_2500_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST2510) { + ast->support_wsxga_p = true; + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2500_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, + .crtc_hsync_precatch_needed = true, +}; + +struct drm_device *ast_2500_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2500_device_quirks); + + ast->dclk_table = ast_2500_dclk_table; + + ast_2300_detect_tx_chip(ast); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (ast->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ast_2500_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c index 8d75a47444f5..dee78fd5b022 100644 --- a/drivers/gpu/drm/ast/ast_2600.c +++ b/drivers/gpu/drm/ast/ast_2600.c @@ -26,6 +26,10 @@ * Authors: Dave Airlie <airlied@redhat.com> */ +#include <linux/pci.h> + +#include <drm/drm_drv.h> + #include "ast_drv.h" #include "ast_post.h" @@ -42,3 +46,71 @@ int ast_2600_post(struct ast_device *ast) return 0; } + +/* + * Device initialization + */ + +static void ast_2600_detect_widescreen(struct ast_device *ast) +{ + ast->support_wsxga_p = true; + ast->support_fullhd = true; + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2600_device_quirks = { + .crtc_mem_req_threshold_low = 160, + .crtc_mem_req_threshold_high = 224, + .crtc_hsync_precatch_needed = true, + .crtc_hsync_add4_needed = true, +}; + +struct drm_device *ast_2600_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2600_device_quirks); + + ast->dclk_table = ast_2500_dclk_table; + + ast_2300_detect_tx_chip(ast); + + switch (ast->tx_chip) { + case AST_TX_ASTDP: + ret = ast_post_gpu(ast); + break; + default: + ret = 0; + if (need_post) + ret = ast_post_gpu(ast); + break; + } + if (ret) + return ERR_PTR(ret); + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ast_2600_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 473faa92d08c..b9a9b050b546 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -37,6 +37,7 @@ #include <drm/drm_fbdev_shmem.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_module.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "ast_drv.h" @@ -46,6 +47,34 @@ static int ast_modeset = -1; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, ast_modeset, int, 0400); +void ast_device_init(struct ast_device *ast, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + const struct ast_device_quirks *quirks) +{ + ast->quirks = quirks; + ast->chip = chip; + ast->config_mode = config_mode; + ast->regs = regs; + ast->ioregs = ioregs; +} + +void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip) +{ + static const char * const info_str[] = { + "analog VGA", + "Sil164 TMDS transmitter", + "DP501 DisplayPort transmitter", + "ASPEED DisplayPort transmitter", + }; + + drm_info(&ast->base, "Using %s\n", info_str[tx_chip]); + + ast->tx_chip = tx_chip; +} + /* * DRM driver */ @@ -266,7 +295,7 @@ static int ast_detect_chip(struct pci_dev *pdev, *chip_out = chip; *config_mode_out = config_mode; - return 0; + return __AST_CHIP_GEN(chip); } static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -277,6 +306,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *ioregs; enum ast_config_mode config_mode; enum ast_chip chip; + unsigned int chip_gen; struct drm_device *drm; bool need_post = false; @@ -349,10 +379,43 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode); - if (ret) + if (ret < 0) return ret; + chip_gen = ret; - drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post); + switch (chip_gen) { + case 1: + drm = ast_2000_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 2: + drm = ast_2100_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 3: + drm = ast_2200_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 4: + drm = ast_2300_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 5: + drm = ast_2400_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 6: + drm = ast_2500_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 7: + drm = ast_2600_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + default: + dev_err(&pdev->dev, "Gen%d not supported\n", chip_gen); + return -ENODEV; + } if (IS_ERR(drm)) return PTR_ERR(drm); pci_set_drvdata(pdev, drm); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index d41bd876167c..787e38c6c17d 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -164,9 +164,31 @@ to_ast_connector(struct drm_connector *connector) * Device */ +struct ast_device_quirks { + /* + * CRTC memory request threshold + */ + unsigned char crtc_mem_req_threshold_low; + unsigned char crtc_mem_req_threshold_high; + + /* + * Adjust hsync values to load next scanline early. Signalled + * by AST2500PreCatchCRT in VBIOS mode flags. + */ + bool crtc_hsync_precatch_needed; + + /* + * Workaround for modes with HSync Time that is not a multiple + * of 8 (e.g., 1920x1080@60Hz, HSync +44 pixels). + */ + bool crtc_hsync_add4_needed; +}; + struct ast_device { struct drm_device base; + const struct ast_device_quirks *quirks; + void __iomem *regs; void __iomem *ioregs; void __iomem *dp501_fw_buf; @@ -174,6 +196,8 @@ struct ast_device { enum ast_config_mode config_mode; enum ast_chip chip; + const struct ast_vbios_dclk_info *dclk_table; + void __iomem *vram; unsigned long vram_base; unsigned long vram_size; @@ -217,14 +241,6 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev) return container_of(dev, struct ast_device, base); } -struct drm_device *ast_device_create(struct pci_dev *pdev, - const struct drm_driver *drv, - enum ast_chip chip, - enum ast_config_mode config_mode, - void __iomem *regs, - void __iomem *ioregs, - bool need_post); - static inline unsigned long __ast_gen(struct ast_device *ast) { return __AST_CHIP_GEN(ast->chip); @@ -415,21 +431,89 @@ struct ast_crtc_state { int ast_mm_init(struct ast_device *ast); +/* ast_drv.c */ +void ast_device_init(struct ast_device *ast, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + const struct ast_device_quirks *quirks); +void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip); + /* ast_2000.c */ int ast_2000_post(struct ast_device *ast); +extern const struct ast_vbios_dclk_info ast_2000_dclk_table[]; +void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post); +struct drm_device *ast_2000_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2100.c */ int ast_2100_post(struct ast_device *ast); +bool __ast_2100_detect_wsxga_p(struct ast_device *ast); +bool __ast_2100_detect_wuxga(struct ast_device *ast); +struct drm_device *ast_2100_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); + +/* ast_2200.c */ +struct drm_device *ast_2200_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2300.c */ int ast_2300_post(struct ast_device *ast); +void ast_2300_detect_tx_chip(struct ast_device *ast); +struct drm_device *ast_2300_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); + +/* ast_2400.c */ +struct drm_device *ast_2400_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2500.c */ void ast_2500_patch_ahb(void __iomem *regs); int ast_2500_post(struct ast_device *ast); +extern const struct ast_vbios_dclk_info ast_2500_dclk_table[]; +struct drm_device *ast_2500_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2600.c */ int ast_2600_post(struct ast_device *ast); +struct drm_device *ast_2600_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast post */ int ast_post_gpu(struct ast_device *ast); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c deleted file mode 100644 index 3eea6a6cdacd..000000000000 --- a/drivers/gpu/drm/ast/ast_main.c +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - */ -/* - * Authors: Dave Airlie <airlied@redhat.com> - */ - -#include <linux/of.h> -#include <linux/pci.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_drv.h> -#include <drm/drm_gem.h> -#include <drm/drm_managed.h> - -#include "ast_drv.h" - -/* Try to detect WSXGA+ on Gen2+ */ -static bool __ast_2100_detect_wsxga_p(struct ast_device *ast) -{ - u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0); - - if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC)) - return true; - if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN) - return true; - - return false; -} - -/* Try to detect WUXGA on Gen2+ */ -static bool __ast_2100_detect_wuxga(struct ast_device *ast) -{ - u8 vgacrd1; - - if (ast->support_fullhd) { - vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1); - if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA)) - return true; - } - - return false; -} - -static void ast_detect_widescreen(struct ast_device *ast) -{ - ast->support_wsxga_p = false; - ast->support_fullhd = false; - ast->support_wuxga = false; - - if (AST_GEN(ast) >= 7) { - ast->support_wsxga_p = true; - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 6) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - else if (ast->chip == AST2510) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 5) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - else if (ast->chip == AST1400) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 4) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - else if (ast->chip == AST1300) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 3) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) { - if (ast->chip == AST2200) - ast->support_fullhd = true; - } - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 2) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) { - if (ast->chip == AST2100) - ast->support_fullhd = true; - } - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } -} - -static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) -{ - static const char * const info_str[] = { - "analog VGA", - "Sil164 TMDS transmitter", - "DP501 DisplayPort transmitter", - "ASPEED DisplayPort transmitter", - }; - - struct drm_device *dev = &ast->base; - u8 vgacra3, vgacrd1; - - /* Check 3rd Tx option (digital output afaik) */ - ast->tx_chip = AST_TX_NONE; - - if (AST_GEN(ast) <= 3) { - /* - * VGACRA3 Enhanced Color Mode Register, check if DVO is already - * enabled, in that case, assume we have a SIL164 TMDS transmitter - * - * Don't make that assumption if we the chip wasn't enabled and - * is at power-on reset, otherwise we'll incorrectly "detect" a - * SIL164 when there is none. - */ - if (!need_post) { - vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff); - if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED) - ast->tx_chip = AST_TX_SIL164; - } - } else { - /* - * On AST GEN4+, look at the configuration set by the SoC in - * the SOC scratch register #1 bits 11:8 (interestingly marked - * as "reserved" in the spec) - */ - vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, - AST_IO_VGACRD1_TX_TYPE_MASK); - switch (vgacrd1) { - /* - * GEN4 to GEN6 - */ - case AST_IO_VGACRD1_TX_SIL164_VBIOS: - ast->tx_chip = AST_TX_SIL164; - break; - case AST_IO_VGACRD1_TX_DP501_VBIOS: - ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL); - if (ast->dp501_fw_addr) { - /* backup firmware */ - if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) { - drmm_kfree(dev, ast->dp501_fw_addr); - ast->dp501_fw_addr = NULL; - } - } - fallthrough; - case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: - ast->tx_chip = AST_TX_DP501; - break; - /* - * GEN7+ - */ - case AST_IO_VGACRD1_TX_ASTDP: - ast->tx_chip = AST_TX_ASTDP; - break; - /* - * Several of the listed TX chips are not explicitly supported - * by the ast driver. If these exist in real-world devices, they - * are most likely reported as VGA or SIL164 outputs. We warn here - * to get bug reports for these devices. If none come in for some - * time, we can begin to fail device probing on these values. - */ - case AST_IO_VGACRD1_TX_ITE66121_VBIOS: - drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", - vgacrd1, AST_GEN(ast)); - break; - case AST_IO_VGACRD1_TX_CH7003_VBIOS: - drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", - vgacrd1, AST_GEN(ast)); - break; - case AST_IO_VGACRD1_TX_ANX9807_VBIOS: - drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", - vgacrd1, AST_GEN(ast)); - break; - } - } - - drm_info(dev, "Using %s\n", info_str[ast->tx_chip]); -} - -struct drm_device *ast_device_create(struct pci_dev *pdev, - const struct drm_driver *drv, - enum ast_chip chip, - enum ast_config_mode config_mode, - void __iomem *regs, - void __iomem *ioregs, - bool need_post) -{ - struct drm_device *dev; - struct ast_device *ast; - int ret; - - ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); - if (IS_ERR(ast)) - return ERR_CAST(ast); - dev = &ast->base; - - ast->chip = chip; - ast->config_mode = config_mode; - ast->regs = regs; - ast->ioregs = ioregs; - - ast_detect_tx_chip(ast, need_post); - switch (ast->tx_chip) { - case AST_TX_ASTDP: - ret = ast_post_gpu(ast); - break; - default: - ret = 0; - if (need_post) - ret = ast_post_gpu(ast); - break; - } - if (ret) - return ERR_PTR(ret); - - ret = ast_mm_init(ast); - if (ret) - return ERR_PTR(ret); - - /* map reserved buffer */ - ast->dp501_fw_buf = NULL; - if (ast->vram_size < pci_resource_len(pdev, 0)) { - ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); - if (!ast->dp501_fw_buf) - drm_info(dev, "failed to map reserved buffer!\n"); - } - - ast_detect_widescreen(ast); - - ret = ast_mode_config_init(ast); - if (ret) - return ERR_PTR(ret); - - return dev; -} diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 30b011ed0a05..cd08990a10f9 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -43,6 +43,7 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_panic.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "ast_drv.h" @@ -241,16 +242,15 @@ static void ast_set_std_reg(struct ast_device *ast, ast_set_index_reg(ast, AST_IO_VGAGRI, i, stdtable->gr[i]); } -static void ast_set_crtc_reg(struct ast_device *ast, - struct drm_display_mode *mode, +static void ast_set_crtc_reg(struct ast_device *ast, struct drm_display_mode *mode, const struct ast_vbios_enhtable *vmode) { u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0; - u16 temp, precache = 0; + u16 temp; + unsigned char crtc_hsync_precatch = 0; - if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) && - (vmode->flags & AST2500PreCatchCRT)) - precache = 40; + if (ast->quirks->crtc_hsync_precatch_needed && (vmode->flags & AST2500PreCatchCRT)) + crtc_hsync_precatch = 40; ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00); @@ -276,12 +276,12 @@ static void ast_set_crtc_reg(struct ast_device *ast, jregAD |= 0x01; /* HBE D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x03, 0xE0, (temp & 0x1f)); - temp = ((mode->crtc_hsync_start-precache) >> 3) - 1; + temp = ((mode->crtc_hsync_start - crtc_hsync_precatch) >> 3) - 1; if (temp & 0x100) jregAC |= 0x40; /* HRS D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x04, 0x00, temp); - temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f; + temp = (((mode->crtc_hsync_end - crtc_hsync_precatch) >> 3) - 1) & 0x3f; if (temp & 0x20) jregAD |= 0x04; /* HRE D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05)); @@ -289,8 +289,7 @@ static void ast_set_crtc_reg(struct ast_device *ast, ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAC, 0x00, jregAC); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAD, 0x00, jregAD); - // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels); - if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080)) + if (ast->quirks->crtc_hsync_add4_needed && mode->crtc_vdisplay == 1080) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x02); else ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x00); @@ -348,7 +347,7 @@ static void ast_set_crtc_reg(struct ast_device *ast, ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x09, 0xdf, jreg09); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAE, 0x00, (jregAE | 0x80)); - if (precache) + if (crtc_hsync_precatch) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x80); else ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x00); @@ -370,12 +369,7 @@ static void ast_set_dclk_reg(struct ast_device *ast, struct drm_display_mode *mode, const struct ast_vbios_enhtable *vmode) { - const struct ast_vbios_dclk_info *clk_info; - - if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) - clk_info = &dclk_table_ast2500[vmode->dclk_index]; - else - clk_info = &dclk_table[vmode->dclk_index]; + const struct ast_vbios_dclk_info *clk_info = &ast->dclk_table[vmode->dclk_index]; ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2); @@ -415,20 +409,11 @@ static void ast_set_color_reg(struct ast_device *ast, static void ast_set_crtthd_reg(struct ast_device *ast) { - /* Set Threshold */ - if (IS_AST_GEN7(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0xe0); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0xa0); - } else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x78); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x60); - } else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x3f); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x2f); - } else { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x2f); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x1f); - } + u8 vgacra6 = ast->quirks->crtc_mem_req_threshold_low; + u8 vgacra7 = ast->quirks->crtc_mem_req_threshold_high; + + ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, vgacra7); + ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, vgacra6); } static void ast_set_sync_reg(struct ast_device *ast, @@ -572,9 +557,14 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane, ast_set_vbios_color_reg(ast, fb->format, ast_crtc_state->vmode); } - drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); - drm_atomic_for_each_plane_damage(&iter, &damage) { - ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage); + /* if the buffer comes from another device */ + if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE) == 0) { + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); + drm_atomic_for_each_plane_damage(&iter, &damage) { + ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage); + } + + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); } /* diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index f1c9f7e1f1fc..7da5b5c60f41 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -33,66 +33,6 @@ #define HiCModeIndex 3 #define TrueCModeIndex 4 -static const struct ast_vbios_dclk_info dclk_table[] = { - {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ - {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ - {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ - {0x76, 0x63, 0x01}, /* 03: VCLK36 */ - {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ - {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ - {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ - {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ - {0x80, 0x64, 0x00}, /* 08: VCLK65 */ - {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ - {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ - {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ - {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ - {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ - {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ - {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ - {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ - {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ - {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ - {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ - {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ - {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ - {0x77, 0x58, 0x80}, /* 17: VCLK119 */ - {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ - {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ - {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */ -}; - -static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { - {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ - {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ - {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ - {0x76, 0x63, 0x01}, /* 03: VCLK36 */ - {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ - {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ - {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ - {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ - {0x80, 0x64, 0x00}, /* 08: VCLK65 */ - {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ - {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ - {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ - {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ - {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ - {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ - {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ - {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ - {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ - {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ - {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ - {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ - {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ - {0x58, 0x01, 0x42}, /* 17: VCLK119 */ - {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ - {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ - {0x44, 0x20, 0x43}, /* 1A: VCLK118_25 */ -}; - static const struct ast_vbios_stdtable vbios_stdtable[] = { /* MD_2_3_400 */ { diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 0f7ffb3ced20..e0efc7309b1b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -20,6 +20,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -215,32 +216,32 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c, if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_XLCDC_CM), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_XLCDC_SD); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_XLCDC_SD, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n"); } regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_DISP), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_SYNC), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_PIXEL_CLK), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n"); clk_disable_unprepare(crtc->dc->hlcdc->sys_clk); pinctrl_pm_select_sleep_state(dev->dev); @@ -269,32 +270,32 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c, if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_PIXEL_CLK, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_SYNC, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_DISP, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n"); if (crtc->dc->desc->is_xlcdc) { regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_CM); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_XLCDC_CM, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_SD); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_XLCDC_SD), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n"); } pm_runtime_put_sync(dev->dev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index fa8ad94e431a..dd70894c8f38 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -25,6 +25,7 @@ #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -724,19 +725,19 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev) ret = atmel_hlcdc_create_outputs(dev); if (ret) { - dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret); + drm_err(dev, "failed to create HLCDC outputs: %d\n", ret); return ret; } ret = atmel_hlcdc_create_planes(dev); if (ret) { - dev_err(dev->dev, "failed to create planes: %d\n", ret); + drm_err(dev, "failed to create planes: %d\n", ret); return ret; } ret = atmel_hlcdc_crtc_create(dev); if (ret) { - dev_err(dev->dev, "failed to create crtc\n"); + drm_err(dev, "failed to create crtc\n"); return ret; } @@ -778,7 +779,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { - dev_err(dev->dev, "failed to enable periph_clk\n"); + drm_err(dev, "failed to enable periph_clk\n"); return ret; } @@ -786,13 +787,13 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = drm_vblank_init(dev, 1); if (ret < 0) { - dev_err(dev->dev, "failed to initialize vblank\n"); + drm_err(dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { - dev_err(dev->dev, "failed to initialize mode setting\n"); + drm_err(dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } @@ -802,7 +803,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = atmel_hlcdc_dc_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); if (ret < 0) { - dev_err(dev->dev, "failed to install IRQ handler\n"); + drm_err(dev, "failed to install IRQ handler\n"); goto err_periph_clk_disable; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h index e1a0bb24b511..53d47f01db0b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h @@ -378,7 +378,8 @@ struct atmel_lcdc_dc_ops { void (*lcdc_update_buffers)(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state, u32 sr, int i); - void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane); + void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc); void (*lcdc_update_general_settings)(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state); void (*lcdc_atomic_update)(struct atmel_hlcdc_plane *plane, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 50fee6a93964..0b8a86afb096 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -15,6 +15,7 @@ #include <drm/drm_bridge.h> #include <drm/drm_encoder.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "atmel_hlcdc_dc.h" @@ -92,7 +93,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint) output->bus_fmt = atmel_hlcdc_of_bus_fmt(ep); of_node_put(ep); if (output->bus_fmt < 0) { - dev_err(dev->dev, "endpoint %d: invalid bus width\n", endpoint); + drm_err(dev, "endpoint %d: invalid bus width\n", endpoint); return -EINVAL; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 4a7ba0918eca..92132be9823f 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -16,6 +16,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include "atmel_hlcdc_dc.h" @@ -365,13 +366,34 @@ void atmel_xlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane, xfactor); /* - * With YCbCr 4:2:2 and YCbYcr 4:2:0 window resampling, configuration - * register LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT is half + * With YCbCr 4:2:0 window resampling, configuration register + * LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT values are half * the value of yfactor and xfactor. + * + * On the other hand, with YCbCr 4:2:2 window resampling, only the + * configuration register LCDC_HEOCFG27.HXSCFACT value is half the value + * of the xfactor; the value of LCDC_HEOCFG25.VXSCFACT is yfactor (no + * division by 2). */ - if (state->base.fb->format->format == DRM_FORMAT_YUV420) { + switch (state->base.fb->format->format) { + /* YCbCr 4:2:2 */ + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_NV61: + xfactor /= 2; + break; + + /* YCbCr 4:2:0 */ + case DRM_FORMAT_YUV420: + case DRM_FORMAT_NV21: yfactor /= 2; xfactor /= 2; + break; + default: + break; } atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 2, @@ -714,7 +736,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, if (!hstate->base.crtc || WARN_ON(!fb)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, s->crtc); mode = &crtc_state->adjusted_mode; ret = drm_atomic_helper_check_plane_state(s, crtc_state, @@ -816,7 +838,8 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, return 0; } -static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) +static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc) { /* Disable interrupts */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR, @@ -832,7 +855,8 @@ static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR); } -static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) +static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc) { /* Disable interrupts */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_IDR, @@ -842,6 +866,15 @@ static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_ENR, 0); + /* + * Updating XLCDC_xxxCFGx, XLCDC_xxxFBA and XLCDC_xxxEN, + * (where xxx indicates each layer) requires writing one to the + * Update Attribute field for each layer in LCDC_ATTRE register for SAM9X7. + */ + regmap_write(dc->hlcdc->regmap, ATMEL_XLCDC_ATTRE, ATMEL_XLCDC_BASE_UPDATE | + ATMEL_XLCDC_OVR1_UPDATE | ATMEL_XLCDC_OVR3_UPDATE | + ATMEL_XLCDC_HEO_UPDATE); + /* Clear all pending interrupts */ atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_XLCDC_LAYER_ISR); } @@ -852,7 +885,7 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p, struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private; - dc->desc->ops->lcdc_atomic_disable(plane); + dc->desc->ops->lcdc_atomic_disable(plane, dc); } static void atmel_hlcdc_atomic_update(struct atmel_hlcdc_plane *plane, @@ -1034,7 +1067,7 @@ static void atmel_hlcdc_irq_dbg(struct atmel_hlcdc_plane *plane, if (isr & (ATMEL_HLCDC_LAYER_OVR_IRQ(0) | ATMEL_HLCDC_LAYER_OVR_IRQ(1) | ATMEL_HLCDC_LAYER_OVR_IRQ(2))) - dev_dbg(plane->base.dev->dev, "overrun on plane %s\n", + drm_dbg(plane->base.dev, "overrun on plane %s\n", desc->name); } @@ -1051,7 +1084,7 @@ static void atmel_xlcdc_irq_dbg(struct atmel_hlcdc_plane *plane, if (isr & (ATMEL_XLCDC_LAYER_OVR_IRQ(0) | ATMEL_XLCDC_LAYER_OVR_IRQ(1) | ATMEL_XLCDC_LAYER_OVR_IRQ(2))) - dev_dbg(plane->base.dev->dev, "overrun on plane %s\n", + drm_dbg(plane->base.dev, "overrun on plane %s\n", desc->name); } @@ -1140,7 +1173,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p) if (state) { if (atmel_hlcdc_plane_alloc_dscrs(p, state)) { kfree(state); - dev_err(p->dev->dev, + drm_err(p->dev, "Failed to allocate initial plane state\n"); return; } diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig index 9a480c6abb85..b9028a5e5a06 100644 --- a/drivers/gpu/drm/bridge/imx/Kconfig +++ b/drivers/gpu/drm/bridge/imx/Kconfig @@ -18,12 +18,23 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE depends on OF depends on COMMON_CLK select DRM_DW_HDMI + imply DRM_IMX8MP_HDMI_PAI imply DRM_IMX8MP_HDMI_PVI imply PHY_FSL_SAMSUNG_HDMI_PHY help Choose this to enable support for the internal HDMI encoder found on the i.MX8MP SoC. +config DRM_IMX8MP_HDMI_PAI + tristate "Freescale i.MX8MP HDMI PAI bridge support" + depends on OF + select DRM_DW_HDMI + select REGMAP + select REGMAP_MMIO + help + Choose this to enable support for the internal HDMI TX Parallel + Audio Interface found on the Freescale i.MX8MP SoC. + config DRM_IMX8MP_HDMI_PVI tristate "Freescale i.MX8MP HDMI PVI bridge support" depends on OF diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile index dd5d48584806..8d01fda25451 100644 --- a/drivers/gpu/drm/bridge/imx/Makefile +++ b/drivers/gpu/drm/bridge/imx/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o +obj-$(CONFIG_DRM_IMX8MP_HDMI_PAI) += imx8mp-hdmi-pai.o obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c new file mode 100644 index 000000000000..8d13a35b206a --- /dev/null +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2025 NXP + */ + +#include <linux/bitfield.h> +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <drm/bridge/dw_hdmi.h> +#include <sound/asoundef.h> + +#define HTX_PAI_CTRL 0x00 +#define ENABLE BIT(0) + +#define HTX_PAI_CTRL_EXT 0x04 +#define WTMK_HIGH_MASK GENMASK(31, 24) +#define WTMK_LOW_MASK GENMASK(23, 16) +#define NUM_CH_MASK GENMASK(10, 8) +#define WTMK_HIGH(n) FIELD_PREP(WTMK_HIGH_MASK, (n)) +#define WTMK_LOW(n) FIELD_PREP(WTMK_LOW_MASK, (n)) +#define NUM_CH(n) FIELD_PREP(NUM_CH_MASK, (n) - 1) + +#define HTX_PAI_FIELD_CTRL 0x08 +#define PRE_SEL GENMASK(28, 24) +#define D_SEL GENMASK(23, 20) +#define V_SEL GENMASK(19, 15) +#define U_SEL GENMASK(14, 10) +#define C_SEL GENMASK(9, 5) +#define P_SEL GENMASK(4, 0) + +struct imx8mp_hdmi_pai { + struct regmap *regmap; +}; + +static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel, + int width, int rate, int non_pcm, + int iec958) +{ + const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); + struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; + int val; + + /* PAI set control extended */ + val = WTMK_HIGH(3) | WTMK_LOW(3); + val |= NUM_CH(channel); + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL_EXT, val); + + /* IEC60958 format */ + if (iec958) { + val = FIELD_PREP_CONST(P_SEL, + __bf_shf(IEC958_SUBFRAME_PARITY)); + val |= FIELD_PREP_CONST(C_SEL, + __bf_shf(IEC958_SUBFRAME_CHANNEL_STATUS)); + val |= FIELD_PREP_CONST(U_SEL, + __bf_shf(IEC958_SUBFRAME_USER_DATA)); + val |= FIELD_PREP_CONST(V_SEL, + __bf_shf(IEC958_SUBFRAME_VALIDITY)); + val |= FIELD_PREP_CONST(D_SEL, + __bf_shf(IEC958_SUBFRAME_SAMPLE_24_MASK)); + val |= FIELD_PREP_CONST(PRE_SEL, + __bf_shf(IEC958_SUBFRAME_PREAMBLE_MASK)); + } else { + /* + * The allowed PCM widths are 24bit and 32bit, as they are supported + * by aud2htx module. + * for 24bit, D_SEL = 0, select all the bits. + * for 32bit, D_SEL = 8, select 24bit in MSB. + */ + val = FIELD_PREP(D_SEL, width - 24); + } + + regmap_write(hdmi_pai->regmap, HTX_PAI_FIELD_CTRL, val); + + /* PAI start running */ + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, ENABLE); +} + +static void imx8mp_hdmi_pai_disable(struct dw_hdmi *dw_hdmi) +{ + const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); + struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; + + /* Stop PAI */ + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0); +} + +static const struct regmap_config imx8mp_hdmi_pai_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = HTX_PAI_FIELD_CTRL, +}; + +static int imx8mp_hdmi_pai_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dw_hdmi_plat_data *plat_data = data; + struct imx8mp_hdmi_pai *hdmi_pai; + struct resource *res; + void __iomem *base; + + hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL); + if (!hdmi_pai) + return -ENOMEM; + + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(base)) + return PTR_ERR(base); + + hdmi_pai->regmap = devm_regmap_init_mmio_clk(dev, "apb", base, + &imx8mp_hdmi_pai_regmap_config); + if (IS_ERR(hdmi_pai->regmap)) { + dev_err(dev, "regmap init failed\n"); + return PTR_ERR(hdmi_pai->regmap); + } + + plat_data->enable_audio = imx8mp_hdmi_pai_enable; + plat_data->disable_audio = imx8mp_hdmi_pai_disable; + plat_data->priv_audio = hdmi_pai; + + return 0; +} + +static const struct component_ops imx8mp_hdmi_pai_ops = { + .bind = imx8mp_hdmi_pai_bind, +}; + +static int imx8mp_hdmi_pai_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &imx8mp_hdmi_pai_ops); +} + +static void imx8mp_hdmi_pai_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &imx8mp_hdmi_pai_ops); +} + +static const struct of_device_id imx8mp_hdmi_pai_of_table[] = { + { .compatible = "fsl,imx8mp-hdmi-pai" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pai_of_table); + +static struct platform_driver imx8mp_hdmi_pai_platform_driver = { + .probe = imx8mp_hdmi_pai_probe, + .remove = imx8mp_hdmi_pai_remove, + .driver = { + .name = "imx8mp-hdmi-pai", + .of_match_table = imx8mp_hdmi_pai_of_table, + }, +}; +module_platform_driver(imx8mp_hdmi_pai_platform_driver); + +MODULE_DESCRIPTION("i.MX8MP HDMI PAI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c index 1e7a789ec289..32fd3554e267 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c @@ -5,11 +5,13 @@ */ #include <linux/clk.h> +#include <linux/component.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <drm/bridge/dw_hdmi.h> #include <drm/drm_modes.h> +#include <drm/drm_of.h> struct imx8mp_hdmi { struct dw_hdmi_plat_data plat_data; @@ -79,10 +81,45 @@ static const struct dw_hdmi_phy_ops imx8mp_hdmi_phy_ops = { .update_hpd = dw_hdmi_phy_update_hpd, }; +static int imx8mp_dw_hdmi_bind(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev); + int ret; + + ret = component_bind_all(dev, &hdmi->plat_data); + if (ret) + return dev_err_probe(dev, ret, "component_bind_all failed!\n"); + + hdmi->dw_hdmi = dw_hdmi_probe(pdev, &hdmi->plat_data); + if (IS_ERR(hdmi->dw_hdmi)) { + component_unbind_all(dev, &hdmi->plat_data); + return PTR_ERR(hdmi->dw_hdmi); + } + + return 0; +} + +static void imx8mp_dw_hdmi_unbind(struct device *dev) +{ + struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev); + + dw_hdmi_remove(hdmi->dw_hdmi); + + component_unbind_all(dev, &hdmi->plat_data); +} + +static const struct component_master_ops imx8mp_dw_hdmi_ops = { + .bind = imx8mp_dw_hdmi_bind, + .unbind = imx8mp_dw_hdmi_unbind, +}; + static int imx8mp_dw_hdmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_hdmi_plat_data *plat_data; + struct component_match *match = NULL; + struct device_node *remote; struct imx8mp_hdmi *hdmi; hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); @@ -102,20 +139,38 @@ static int imx8mp_dw_hdmi_probe(struct platform_device *pdev) plat_data->priv_data = hdmi; plat_data->phy_force_vendor = true; - hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data); - if (IS_ERR(hdmi->dw_hdmi)) - return PTR_ERR(hdmi->dw_hdmi); - platform_set_drvdata(pdev, hdmi); + /* port@2 is for hdmi_pai device */ + remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0); + if (!remote) { + hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data); + if (IS_ERR(hdmi->dw_hdmi)) + return PTR_ERR(hdmi->dw_hdmi); + } else { + drm_of_component_match_add(dev, &match, component_compare_of, remote); + + of_node_put(remote); + + return component_master_add_with_match(dev, &imx8mp_dw_hdmi_ops, match); + } + return 0; } static void imx8mp_dw_hdmi_remove(struct platform_device *pdev) { struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev); + struct device_node *remote; - dw_hdmi_remove(hdmi->dw_hdmi); + remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0); + if (remote) { + of_node_put(remote); + + component_master_del(&pdev->dev, &imx8mp_dw_hdmi_ops); + } else { + dw_hdmi_remove(hdmi->dw_hdmi); + } } static int imx8mp_dw_hdmi_pm_suspend(struct device *dev) diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c index 5d272916e200..122502968927 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c @@ -683,11 +683,6 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int imx8qxp_ldb_runtime_suspend(struct device *dev) -{ - return 0; -} - static int imx8qxp_ldb_runtime_resume(struct device *dev) { struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev); @@ -700,7 +695,7 @@ static int imx8qxp_ldb_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8qxp_ldb_pm_ops = { - RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL) + RUNTIME_PM_OPS(NULL, imx8qxp_ldb_runtime_resume, NULL) }; static const struct of_device_id imx8qxp_ldb_dt_ids[] = { diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index aa7b1dcc5d70..0185f61e6e59 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -287,6 +287,7 @@ enum chip_id { ID_IT6610, ID_IT66121, + ID_IT66122, }; struct it66121_chip_info { @@ -312,7 +313,7 @@ struct it66121_ctx { u8 swl; bool auto_cts; } audio; - const struct it66121_chip_info *info; + enum chip_id id; }; static const struct regmap_range_cfg it66121_regmap_banks[] = { @@ -402,7 +403,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx, if (ret) return ret; - if (ctx->info->id == ID_IT66121) { + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_EC1, 0); if (ret) @@ -428,7 +429,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx, if (ret) return ret; - if (ctx->info->id == ID_IT66121) { + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_EC1, IT66121_AFE_IP_EC1); @@ -449,7 +450,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx, if (ret) return ret; - if (ctx->info->id == ID_IT6610) { + if (ctx->id == ID_IT6610) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG, IT6610_AFE_XP_BYPASS, IT6610_AFE_XP_BYPASS); @@ -599,7 +600,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge, if (ret) return ret; - if (ctx->info->id == ID_IT66121) { + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_RCLK, 0); if (ret) @@ -748,7 +749,7 @@ static int it66121_bridge_check(struct drm_bridge *bridge, { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); - if (ctx->info->id == ID_IT6610) { + if (ctx->id == ID_IT6610) { /* The IT6610 only supports these settings */ bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; @@ -802,7 +803,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge, if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI)) goto unlock; - if (ctx->info->id == ID_IT66121 && + if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) && regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, IT66121_CLK_BANK_PWROFF_TXCLK)) { @@ -815,7 +816,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge, if (it66121_configure_afe(ctx, adjusted_mode)) goto unlock; - if (ctx->info->id == ID_IT66121 && + if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) && regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, 0)) { goto unlock; @@ -1384,8 +1385,6 @@ static int it66121_audio_startup(struct device *dev, void *data) int ret; struct it66121_ctx *ctx = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - mutex_lock(&ctx->lock); ret = it661221_audio_output_enable(ctx, true); if (ret) @@ -1401,8 +1400,6 @@ static void it66121_audio_shutdown(struct device *dev, void *data) int ret; struct it66121_ctx *ctx = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - mutex_lock(&ctx->lock); ret = it661221_audio_output_enable(ctx, false); if (ret) @@ -1479,8 +1476,6 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev) .no_capture_mute = 1, }; - dev_dbg(dev, "%s\n", __func__); - if (!of_property_present(dev->of_node, "#sound-dai-cells")) { dev_info(dev, "No \"#sound-dai-cells\", no audio\n"); return 0; @@ -1504,13 +1499,20 @@ static const char * const it66121_supplies[] = { "vcn33", "vcn18", "vrf12" }; +static const struct it66121_chip_info it66xx_chip_info[] = { + {.id = ID_IT6610, .vid = 0xca00, .pid = 0x0611 }, + {.id = ID_IT66121, .vid = 0x4954, .pid = 0x0612 }, + {.id = ID_IT66122, .vid = 0x4954, .pid = 0x0622 }, +}; + static int it66121_probe(struct i2c_client *client) { u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 }; struct device_node *ep; - int ret; + int ret, i; struct it66121_ctx *ctx; struct device *dev = &client->dev; + const struct it66121_chip_info *chip_info; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(dev, "I2C check functionality failed.\n"); @@ -1528,7 +1530,6 @@ static int it66121_probe(struct i2c_client *client) ctx->dev = dev; ctx->client = client; - ctx->info = i2c_get_match_data(client); of_property_read_u32(ep, "bus-width", &ctx->bus_width); of_node_put(ep); @@ -1574,11 +1575,18 @@ static int it66121_probe(struct i2c_client *client) revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]); device_ids[1] &= IT66121_DEVICE_ID1_MASK; - if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid || - (device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) { - return -ENODEV; + for (i = 0; i < ARRAY_SIZE(it66xx_chip_info); i++) { + chip_info = &it66xx_chip_info[i]; + if ((vendor_ids[1] << 8 | vendor_ids[0]) == chip_info->vid && + (device_ids[1] << 8 | device_ids[0]) == chip_info->pid) { + ctx->id = chip_info->id; + break; + } } + if (i == ARRAY_SIZE(it66xx_chip_info)) + return -ENODEV; + ctx->bridge.of_node = dev->of_node; ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA; ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; @@ -1612,28 +1620,18 @@ static void it66121_remove(struct i2c_client *client) mutex_destroy(&ctx->lock); } -static const struct it66121_chip_info it66121_chip_info = { - .id = ID_IT66121, - .vid = 0x4954, - .pid = 0x0612, -}; - -static const struct it66121_chip_info it6610_chip_info = { - .id = ID_IT6610, - .vid = 0xca00, - .pid = 0x0611, -}; - static const struct of_device_id it66121_dt_match[] = { - { .compatible = "ite,it66121", &it66121_chip_info }, - { .compatible = "ite,it6610", &it6610_chip_info }, + { .compatible = "ite,it6610" }, + { .compatible = "ite,it66121" }, + { .compatible = "ite,it66122" }, { } }; MODULE_DEVICE_TABLE(of, it66121_dt_match); static const struct i2c_device_id it66121_id[] = { - { "it66121", (kernel_ulong_t) &it66121_chip_info }, - { "it6610", (kernel_ulong_t) &it6610_chip_info }, + { .name = "it6610" }, + { .name = "it66121" }, + { .name = "it66122" }, { } }; MODULE_DEVICE_TABLE(i2c, it66121_id); diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index d537b1d036fb..1f0aba28ad1e 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -179,7 +179,6 @@ struct sii902x { struct drm_connector connector; struct gpio_desc *reset_gpio; struct i2c_mux_core *i2cmux; - bool sink_is_hdmi; u32 bus_width; /* @@ -315,8 +314,6 @@ static int sii902x_get_modes(struct drm_connector *connector) drm_edid_free(drm_edid); } - sii902x->sink_is_hdmi = connector->display_info.is_hdmi; - return num; } @@ -342,9 +339,17 @@ static void sii902x_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct sii902x *sii902x = bridge_to_sii902x(bridge); + struct drm_connector *connector; + u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (connector && connector->display_info.is_hdmi) + output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI; mutex_lock(&sii902x->mutex); + regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA, + SII902X_SYS_CTRL_OUTPUT_MODE, output_mode); regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL, SII902X_AVI_POWER_STATE_MSK, SII902X_AVI_POWER_STATE_D(0)); @@ -359,16 +364,12 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *adj) { struct sii902x *sii902x = bridge_to_sii902x(bridge); - u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI; struct regmap *regmap = sii902x->regmap; u8 buf[HDMI_INFOFRAME_SIZE(AVI)]; struct hdmi_avi_infoframe frame; u16 pixel_clock_10kHz = adj->clock / 10; int ret; - if (sii902x->sink_is_hdmi) - output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI; - buf[0] = pixel_clock_10kHz & 0xff; buf[1] = pixel_clock_10kHz >> 8; buf[2] = drm_mode_vrefresh(adj); @@ -384,11 +385,6 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge, mutex_lock(&sii902x->mutex); - ret = regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA, - SII902X_SYS_CTRL_OUTPUT_MODE, output_mode); - if (ret) - goto out; - ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10); if (ret) goto out; diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c index e4d0bc2200f8..2cd1847ba776 100644 --- a/drivers/gpu/drm/bridge/simple-bridge.c +++ b/drivers/gpu/drm/bridge/simple-bridge.c @@ -262,6 +262,16 @@ static const struct of_device_id simple_bridge_match[] = { .connector_type = DRM_MODE_CONNECTOR_VGA, }, }, { + .compatible = "asl-tek,cs5263", + .data = &(const struct simple_bridge_info) { + .connector_type = DRM_MODE_CONNECTOR_HDMIA, + }, + }, { + .compatible = "parade,ps185hdm", + .data = &(const struct simple_bridge_info) { + .connector_type = DRM_MODE_CONNECTOR_HDMIA, + }, + }, { .compatible = "radxa,ra620", .data = &(const struct simple_bridge_info) { .connector_type = DRM_MODE_CONNECTOR_HDMIA, diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 2c5e532410de..a46df7583bcf 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -61,6 +61,14 @@ config DRM_DW_HDMI_QP select DRM_KMS_HELPER select REGMAP_MMIO +config DRM_DW_HDMI_QP_CEC + bool "Synopsis Designware QP CEC interface" + depends on DRM_DW_HDMI_QP + select DRM_DISPLAY_HDMI_CEC_HELPER + help + Support the CEC interface which is part of the Synopsys + Designware HDMI QP block. + config DRM_DW_MIPI_DSI tristate select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/bridge/synopsys/dw-dp.c b/drivers/gpu/drm/bridge/synopsys/dw-dp.c index 9bbfe8da3de0..82aaf74e1bc0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-dp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-dp.c @@ -2049,6 +2049,8 @@ struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder, bridge->type = DRM_MODE_CONNECTOR_DisplayPort; bridge->ycbcr_420_allowed = true; + devm_drm_bridge_add(dev, bridge); + dp->aux.dev = dev; dp->aux.drm_dev = encoder->dev; dp->aux.name = dev_name(dev); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c index ab18f9a3bf23..df7a37eb47f4 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c @@ -90,6 +90,11 @@ static int audio_hw_params(struct device *dev, void *data, params->iec.status[0] & IEC958_AES0_NONAUDIO); dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width); + if (daifmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE) + dw_hdmi_set_sample_iec958(dw->data.hdmi, 1); + else + dw_hdmi_set_sample_iec958(dw->data.hdmi, 0); + return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 39332c57f2c5..fe4c026280f0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -18,6 +18,7 @@ #include <drm/bridge/dw_hdmi_qp.h> #include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_cec_helper.h> #include <drm/display/drm_hdmi_state_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -26,6 +27,8 @@ #include <drm/drm_edid.h> #include <drm/drm_modes.h> +#include <media/cec.h> + #include <sound/hdmi-codec.h> #include "dw-hdmi-qp.h" @@ -131,17 +134,34 @@ struct dw_hdmi_qp_i2c { bool is_segment; }; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC +struct dw_hdmi_qp_cec { + struct drm_connector *connector; + int irq; + u32 addresses; + struct cec_msg rx_msg; + u8 tx_status; + bool tx_done; + bool rx_done; +}; +#endif + struct dw_hdmi_qp { struct drm_bridge bridge; struct device *dev; struct dw_hdmi_qp_i2c *i2c; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC + struct dw_hdmi_qp_cec *cec; +#endif + struct { const struct dw_hdmi_qp_phy_ops *ops; void *data; } phy; + unsigned long ref_clk_rate; struct regmap *regm; unsigned long tmds_char_rate; @@ -848,8 +868,9 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge, return; if (connector->display_info.is_hdmi) { - dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n", - __func__, conn_state->hdmi.tmds_char_rate); + dev_dbg(hdmi->dev, "%s mode=HDMI %s rate=%llu bpc=%u\n", __func__, + drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format), + conn_state->hdmi.tmds_char_rate, conn_state->hdmi.output_bpc); op_mode = 0; hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate; } else { @@ -965,6 +986,179 @@ static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge, } } +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC +static irqreturn_t dw_hdmi_qp_cec_hardirq(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_cec *cec = hdmi->cec; + irqreturn_t ret = IRQ_HANDLED; + u32 stat; + + stat = dw_hdmi_qp_read(hdmi, CEC_INT_STATUS); + if (stat == 0) + return IRQ_NONE; + + dw_hdmi_qp_write(hdmi, stat, CEC_INT_CLEAR); + + if (stat & CEC_STAT_LINE_ERR) { + cec->tx_status = CEC_TX_STATUS_ERROR; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } else if (stat & CEC_STAT_DONE) { + cec->tx_status = CEC_TX_STATUS_OK; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } else if (stat & CEC_STAT_NACK) { + cec->tx_status = CEC_TX_STATUS_NACK; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } + + if (stat & CEC_STAT_EOM) { + unsigned int len, i, val; + + val = dw_hdmi_qp_read(hdmi, CEC_RX_COUNT_STATUS); + len = (val & 0xf) + 1; + + if (len > sizeof(cec->rx_msg.msg)) + len = sizeof(cec->rx_msg.msg); + + for (i = 0; i < 4; i++) { + val = dw_hdmi_qp_read(hdmi, CEC_RX_DATA3_0 + i * 4); + cec->rx_msg.msg[i * 4] = val & 0xff; + cec->rx_msg.msg[i * 4 + 1] = (val >> 8) & 0xff; + cec->rx_msg.msg[i * 4 + 2] = (val >> 16) & 0xff; + cec->rx_msg.msg[i * 4 + 3] = (val >> 24) & 0xff; + } + + dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL); + + cec->rx_msg.len = len; + cec->rx_done = true; + + ret = IRQ_WAKE_THREAD; + } + + return ret; +} + +static irqreturn_t dw_hdmi_qp_cec_thread(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + if (cec->tx_done) { + cec->tx_done = false; + drm_connector_hdmi_cec_transmit_attempt_done(cec->connector, + cec->tx_status); + } + + if (cec->rx_done) { + cec->rx_done = false; + drm_connector_hdmi_cec_received_msg(cec->connector, &cec->rx_msg); + } + + return IRQ_HANDLED; +} + +static int dw_hdmi_qp_cec_init(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + cec->connector = connector; + + dw_hdmi_qp_write(hdmi, 0, CEC_TX_COUNT); + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N); + + return devm_request_threaded_irq(hdmi->dev, cec->irq, + dw_hdmi_qp_cec_hardirq, + dw_hdmi_qp_cec_thread, IRQF_SHARED, + dev_name(hdmi->dev), hdmi); +} + +static int dw_hdmi_qp_cec_log_addr(struct drm_bridge *bridge, u8 logical_addr) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + if (logical_addr == CEC_LOG_ADDR_INVALID) + cec->addresses = 0; + else + cec->addresses |= BIT(logical_addr) | CEC_ADDR_BROADCAST; + + dw_hdmi_qp_write(hdmi, cec->addresses, CEC_ADDR); + + return 0; +} + +static int dw_hdmi_qp_cec_enable(struct drm_bridge *bridge, bool enable) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + unsigned int irqs; + u32 swdisable; + + if (!enable) { + dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N); + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + + swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE); + swdisable = swdisable | CEC_SWDISABLE; + dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE); + } else { + swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE); + swdisable = swdisable & ~CEC_SWDISABLE; + dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE); + + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL); + + dw_hdmi_qp_cec_log_addr(bridge, CEC_LOG_ADDR_INVALID); + + irqs = CEC_STAT_LINE_ERR | CEC_STAT_NACK | CEC_STAT_EOM | + CEC_STAT_DONE; + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, irqs, CEC_INT_MASK_N); + } + + return 0; +} + +static int dw_hdmi_qp_cec_transmit(struct drm_bridge *bridge, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + unsigned int i; + u32 val; + + for (i = 0; i < msg->len; i++) { + if (!(i % 4)) + val = msg->msg[i]; + if ((i % 4) == 1) + val |= msg->msg[i] << 8; + if ((i % 4) == 2) + val |= msg->msg[i] << 16; + if ((i % 4) == 3) + val |= msg->msg[i] << 24; + + if (i == (msg->len - 1) || (i % 4) == 3) + dw_hdmi_qp_write(hdmi, val, CEC_TX_DATA3_0 + (i / 4) * 4); + } + + dw_hdmi_qp_write(hdmi, msg->len - 1, CEC_TX_COUNT); + dw_hdmi_qp_write(hdmi, CEC_CTRL_START, CEC_TX_CONTROL); + + return 0; +} +#else +#define dw_hdmi_qp_cec_init NULL +#define dw_hdmi_qp_cec_enable NULL +#define dw_hdmi_qp_cec_log_addr NULL +#define dw_hdmi_qp_cec_transmit NULL +#endif /* CONFIG_DRM_DW_HDMI_QP_CEC */ + static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -979,6 +1173,10 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { .hdmi_audio_startup = dw_hdmi_qp_audio_enable, .hdmi_audio_shutdown = dw_hdmi_qp_audio_disable, .hdmi_audio_prepare = dw_hdmi_qp_audio_prepare, + .hdmi_cec_init = dw_hdmi_qp_cec_init, + .hdmi_cec_enable = dw_hdmi_qp_cec_enable, + .hdmi_cec_log_addr = dw_hdmi_qp_cec_log_addr, + .hdmi_cec_transmit = dw_hdmi_qp_cec_transmit, }; static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id) @@ -1014,13 +1212,11 @@ static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi) { dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N); dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N); - dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0); + dw_hdmi_qp_write(hdmi, hdmi->ref_clk_rate, TIMER_BASE_CONFIG0); /* Software reset */ dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); - dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0); - dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0); /* Clear DONE and ERROR interrupts */ @@ -1066,6 +1262,13 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->phy.ops = plat_data->phy_ops; hdmi->phy.data = plat_data->phy_data; + if (plat_data->ref_clk_rate) { + hdmi->ref_clk_rate = plat_data->ref_clk_rate; + } else { + hdmi->ref_clk_rate = 428571429; + dev_warn(dev, "Set ref_clk_rate to vendor default\n"); + } + dw_hdmi_qp_init_hw(hdmi); ret = devm_request_threaded_irq(dev, plat_data->main_irq, @@ -1085,6 +1288,12 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->bridge.vendor = "Synopsys"; hdmi->bridge.product = "DW HDMI QP TX"; + if (plat_data->supported_formats) + hdmi->bridge.supported_formats = plat_data->supported_formats; + + if (plat_data->max_bpc) + hdmi->bridge.max_bpc = plat_data->max_bpc; + hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi); if (IS_ERR(hdmi->bridge.ddc)) return ERR_CAST(hdmi->bridge.ddc); @@ -1093,6 +1302,22 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->bridge.hdmi_audio_dev = dev; hdmi->bridge.hdmi_audio_dai_port = 1; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC + if (plat_data->cec_irq) { + hdmi->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER; + hdmi->bridge.hdmi_cec_dev = dev; + hdmi->bridge.hdmi_cec_adapter_name = dev_name(dev); + + hdmi->cec = devm_kzalloc(hdmi->dev, sizeof(*hdmi->cec), GFP_KERNEL); + if (!hdmi->cec) + return ERR_PTR(-ENOMEM); + + hdmi->cec->irq = plat_data->cec_irq; + } else { + dev_warn(dev, "Disabled CEC support due to missing IRQ\n"); + } +#endif + ret = devm_drm_bridge_add(dev, &hdmi->bridge); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h index 72987e6c4689..91a15f82e32a 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h @@ -488,9 +488,23 @@ #define AUDPKT_VBIT_OVR0 0xf24 /* CEC Registers */ #define CEC_TX_CONTROL 0x1000 +#define CEC_CTRL_CLEAR BIT(0) +#define CEC_CTRL_START BIT(0) #define CEC_STATUS 0x1004 +#define CEC_STAT_DONE BIT(0) +#define CEC_STAT_NACK BIT(1) +#define CEC_STAT_ARBLOST BIT(2) +#define CEC_STAT_LINE_ERR BIT(3) +#define CEC_STAT_RETRANS_FAIL BIT(4) +#define CEC_STAT_DISCARD BIT(5) +#define CEC_STAT_TX_BUSY BIT(8) +#define CEC_STAT_RX_BUSY BIT(9) +#define CEC_STAT_DRIVE_ERR BIT(10) +#define CEC_STAT_EOM BIT(11) +#define CEC_STAT_NOTIFY_ERR BIT(12) #define CEC_CONFIG 0x1008 #define CEC_ADDR 0x100c +#define CEC_ADDR_BROADCAST BIT(15) #define CEC_TX_COUNT 0x1020 #define CEC_TX_DATA3_0 0x1024 #define CEC_TX_DATA7_4 0x1028 diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 206b099a35e9..3b77e73ac0ea 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -177,6 +177,7 @@ struct dw_hdmi { spinlock_t audio_lock; struct mutex audio_mutex; + unsigned int sample_iec958; unsigned int sample_non_pcm; unsigned int sample_width; unsigned int sample_rate; @@ -198,6 +199,12 @@ struct dw_hdmi { enum drm_connector_status last_connector_result; }; +const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi) +{ + return hdmi->plat_data; +} +EXPORT_SYMBOL_GPL(dw_hdmi_to_plat_data); + #define HDMI_IH_PHY_STAT0_RX_SENSE \ (HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \ HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3) @@ -712,6 +719,14 @@ void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm) } EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm); +void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958) +{ + mutex_lock(&hdmi->audio_mutex); + hdmi->sample_iec958 = iec958; + mutex_unlock(&hdmi->audio_mutex); +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_iec958); + void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate) { mutex_lock(&hdmi->audio_mutex); @@ -843,7 +858,8 @@ static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi) hdmi->channels, hdmi->sample_width, hdmi->sample_rate, - hdmi->sample_non_pcm); + hdmi->sample_non_pcm, + hdmi->sample_iec958); } static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index ae0d08e5e960..276d05d25ad8 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -106,10 +106,21 @@ #define SN_PWM_EN_INV_REG 0xA5 #define SN_PWM_INV_MASK BIT(0) #define SN_PWM_EN_MASK BIT(1) + +#define SN_IRQ_EN_REG 0xE0 +#define IRQ_EN BIT(0) + +#define SN_IRQ_EVENTS_EN_REG 0xE6 +#define HPD_INSERTION_EN BIT(1) +#define HPD_REMOVAL_EN BIT(2) + #define SN_AUX_CMD_STATUS_REG 0xF4 #define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3) #define AUX_IRQ_STATUS_AUX_SHORT BIT(5) #define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6) +#define SN_IRQ_STATUS_REG 0xF5 +#define HPD_REMOVAL_STATUS BIT(2) +#define HPD_INSERTION_STATUS BIT(1) #define MIN_DSI_CLK_FREQ_MHZ 40 @@ -152,7 +163,9 @@ * @ln_assign: Value to program to the LN_ASSIGN register. * @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG. * @comms_enabled: If true then communication over the aux channel is enabled. + * @hpd_enabled: If true then HPD events are enabled. * @comms_mutex: Protects modification of comms_enabled. + * @hpd_mutex: Protects modification of hpd_enabled. * * @gchip: If we expose our GPIOs, this is used. * @gchip_output: A cache of whether we've set GPIOs to output. This @@ -190,7 +203,9 @@ struct ti_sn65dsi86 { u8 ln_assign; u8 ln_polrs; bool comms_enabled; + bool hpd_enabled; struct mutex comms_mutex; + struct mutex hpd_mutex; #if defined(CONFIG_OF_GPIO) struct gpio_chip gchip; @@ -221,6 +236,23 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = { .max_register = 0xFF, }; +static int ti_sn65dsi86_read_u8(struct ti_sn65dsi86 *pdata, unsigned int reg, + u8 *val) +{ + int ret; + unsigned int reg_val; + + ret = regmap_read(pdata->regmap, reg, ®_val); + if (ret) { + dev_err(pdata->dev, "fail to read raw reg %#x: %d\n", + reg, ret); + return ret; + } + *val = (u8)reg_val; + + return 0; +} + static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata, unsigned int reg, u16 *val) { @@ -379,6 +411,7 @@ static void ti_sn65dsi86_disable_comms(struct ti_sn65dsi86 *pdata) static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev); + const struct i2c_client *client = to_i2c_client(pdata->dev); int ret; ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies); @@ -413,6 +446,13 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) if (pdata->refclk) ti_sn65dsi86_enable_comms(pdata, NULL); + if (client->irq) { + ret = regmap_update_bits(pdata->regmap, SN_IRQ_EN_REG, IRQ_EN, + IRQ_EN); + if (ret) + dev_err(pdata->dev, "Failed to enable IRQ events: %d\n", ret); + } + return ret; } @@ -1211,6 +1251,8 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry * static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + const struct i2c_client *client = to_i2c_client(pdata->dev); + int ret; /* * Device needs to be powered on before reading the HPD state @@ -1219,11 +1261,35 @@ static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge) */ pm_runtime_get_sync(pdata->dev); + + mutex_lock(&pdata->hpd_mutex); + pdata->hpd_enabled = true; + mutex_unlock(&pdata->hpd_mutex); + + if (client->irq) { + ret = regmap_set_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG, + HPD_REMOVAL_EN | HPD_INSERTION_EN); + if (ret) + dev_err(pdata->dev, "Failed to enable HPD events: %d\n", ret); + } } static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + const struct i2c_client *client = to_i2c_client(pdata->dev); + int ret; + + if (client->irq) { + ret = regmap_clear_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG, + HPD_REMOVAL_EN | HPD_INSERTION_EN); + if (ret) + dev_err(pdata->dev, "Failed to disable HPD events: %d\n", ret); + } + + mutex_lock(&pdata->hpd_mutex); + pdata->hpd_enabled = false; + mutex_unlock(&pdata->hpd_mutex); pm_runtime_put_autosuspend(pdata->dev); } @@ -1309,6 +1375,41 @@ static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata) return 0; } +static irqreturn_t ti_sn_bridge_interrupt(int irq, void *private) +{ + struct ti_sn65dsi86 *pdata = private; + struct drm_device *dev = pdata->bridge.dev; + u8 status; + int ret; + bool hpd_event; + + ret = ti_sn65dsi86_read_u8(pdata, SN_IRQ_STATUS_REG, &status); + if (ret) { + dev_err(pdata->dev, "Failed to read IRQ status: %d\n", ret); + return IRQ_NONE; + } + + hpd_event = status & (HPD_REMOVAL_STATUS | HPD_INSERTION_STATUS); + + dev_dbg(pdata->dev, "(SN_IRQ_STATUS_REG = %#x)\n", status); + if (!status) + return IRQ_NONE; + + ret = regmap_write(pdata->regmap, SN_IRQ_STATUS_REG, status); + if (ret) { + dev_err(pdata->dev, "Failed to clear IRQ status: %d\n", ret); + return IRQ_NONE; + } + + /* Only send the HPD event if we are bound with a device. */ + mutex_lock(&pdata->hpd_mutex); + if (pdata->hpd_enabled && hpd_event) + drm_kms_helper_hotplug_event(dev); + mutex_unlock(&pdata->hpd_mutex); + + return IRQ_HANDLED; +} + static int ti_sn_bridge_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { @@ -1931,6 +2032,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) dev_set_drvdata(dev, pdata); pdata->dev = dev; + mutex_init(&pdata->hpd_mutex); mutex_init(&pdata->comms_mutex); pdata->regmap = devm_regmap_init_i2c(client, @@ -1971,6 +2073,16 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf))) return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n"); + if (client->irq) { + ret = devm_request_threaded_irq(pdata->dev, client->irq, NULL, + ti_sn_bridge_interrupt, + IRQF_ONESHOT, + dev_name(pdata->dev), pdata); + + if (ret) + return dev_err_probe(dev, ret, "failed to request interrupt\n"); + } + /* * Break ourselves up into a collection of aux devices. The only real * motiviation here is to solve the chicken-and-egg problem of probe diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c index 72480db1f00d..515aceac22b1 100644 --- a/drivers/gpu/drm/clients/drm_client_setup.c +++ b/drivers/gpu/drm/clients/drm_client_setup.c @@ -13,8 +13,8 @@ static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT; module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444); MODULE_PARM_DESC(active, - "Choose which drm client to start, default is" - CONFIG_DRM_CLIENT_DEFAULT "]"); + "Choose which drm client to start, default is " + CONFIG_DRM_CLIENT_DEFAULT); /** * drm_client_setup() - Setup in-kernel DRM clients diff --git a/drivers/gpu/drm/clients/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c index f894ba52bdb5..28951e392482 100644 --- a/drivers/gpu/drm/clients/drm_fbdev_client.c +++ b/drivers/gpu/drm/clients/drm_fbdev_client.c @@ -13,22 +13,36 @@ * struct drm_client_funcs */ +static void drm_fbdev_client_free(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); +} + static void drm_fbdev_client_unregister(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); if (fb_helper->info) { + /* + * Fully probed framebuffer device + */ drm_fb_helper_unregister_info(fb_helper); } else { + /* + * Partially initialized client, no framebuffer device yet + */ drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } } -static int drm_fbdev_client_restore(struct drm_client_dev *client) +static int drm_fbdev_client_restore(struct drm_client_dev *client, bool force) { - drm_fb_helper_lastclose(client->dev); + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); return 0; } @@ -62,32 +76,27 @@ err_drm_err: return ret; } -static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +static int drm_fbdev_client_suspend(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - if (holds_console_lock) - drm_fb_helper_set_suspend(fb_helper, true); - else - drm_fb_helper_set_suspend_unlocked(fb_helper, true); + drm_fb_helper_set_suspend_unlocked(fb_helper, true); return 0; } -static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock) +static int drm_fbdev_client_resume(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - if (holds_console_lock) - drm_fb_helper_set_suspend(fb_helper, false); - else - drm_fb_helper_set_suspend_unlocked(fb_helper, false); + drm_fb_helper_set_suspend_unlocked(fb_helper, false); return 0; } static const struct drm_client_funcs drm_fbdev_client_funcs = { .owner = THIS_MODULE, + .free = drm_fbdev_client_free, .unregister = drm_fbdev_client_unregister, .restore = drm_fbdev_client_restore, .hotplug = drm_fbdev_client_hotplug, diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c index d239f1e3c456..4d3005273b27 100644 --- a/drivers/gpu/drm/clients/drm_log.c +++ b/drivers/gpu/drm/clients/drm_log.c @@ -100,7 +100,7 @@ static void drm_log_clear_line(struct drm_log_scanout *scanout, u32 line) return; iosys_map_memset(&map, r.y1 * fb->pitches[0], 0, height * fb->pitches[0]); drm_client_buffer_vunmap_local(scanout->buffer); - drm_client_framebuffer_flush(scanout->buffer, &r); + drm_client_buffer_flush(scanout->buffer, &r); } static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s, @@ -133,7 +133,7 @@ static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s, if (scanout->line >= scanout->rows) scanout->line = 0; drm_client_buffer_vunmap_local(scanout->buffer); - drm_client_framebuffer_flush(scanout->buffer, &r); + drm_client_buffer_flush(scanout->buffer, &r); } static void drm_log_draw_new_line(struct drm_log_scanout *scanout, @@ -204,7 +204,7 @@ static int drm_log_setup_modeset(struct drm_client_dev *client, if (format == DRM_FORMAT_INVALID) return -EINVAL; - scanout->buffer = drm_client_framebuffer_create(client, width, height, format); + scanout->buffer = drm_client_buffer_create_dumb(client, width, height, format); if (IS_ERR(scanout->buffer)) { drm_warn(client->dev, "drm_log can't create framebuffer %d %d %p4cc\n", width, height, &format); @@ -272,7 +272,7 @@ static void drm_log_init_client(struct drm_log *dlog) err_failed_commit: for (i = 0; i < n_modeset; i++) - drm_client_framebuffer_delete(dlog->scanout[i].buffer); + drm_client_buffer_delete(dlog->scanout[i].buffer); err_nomodeset: kfree(dlog->scanout); @@ -286,26 +286,45 @@ static void drm_log_free_scanout(struct drm_client_dev *client) if (dlog->n_scanout) { for (i = 0; i < dlog->n_scanout; i++) - drm_client_framebuffer_delete(dlog->scanout[i].buffer); + drm_client_buffer_delete(dlog->scanout[i].buffer); dlog->n_scanout = 0; kfree(dlog->scanout); dlog->scanout = NULL; } } -static void drm_log_client_unregister(struct drm_client_dev *client) +static void drm_log_client_free(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); struct drm_device *dev = client->dev; + kfree(dlog); + + drm_dbg(dev, "Unregistered with drm log\n"); +} + +static void drm_log_client_unregister(struct drm_client_dev *client) +{ + struct drm_log *dlog = client_to_drm_log(client); + unregister_console(&dlog->con); mutex_lock(&dlog->lock); drm_log_free_scanout(client); - drm_client_release(client); mutex_unlock(&dlog->lock); - kfree(dlog); - drm_dbg(dev, "Unregistered with drm log\n"); + drm_client_release(client); +} + +static int drm_log_client_restore(struct drm_client_dev *client, bool force) +{ + int ret; + + if (force) + ret = drm_client_modeset_commit_locked(client); + else + ret = drm_client_modeset_commit(client); + + return ret; } static int drm_log_client_hotplug(struct drm_client_dev *client) @@ -319,7 +338,7 @@ static int drm_log_client_hotplug(struct drm_client_dev *client) return 0; } -static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock) +static int drm_log_client_suspend(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); @@ -328,7 +347,7 @@ static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_l return 0; } -static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock) +static int drm_log_client_resume(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); @@ -339,7 +358,9 @@ static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lo static const struct drm_client_funcs drm_log_client_funcs = { .owner = THIS_MODULE, + .free = drm_log_client_free, .unregister = drm_log_client_unregister, + .restore = drm_log_client_restore, .hotplug = drm_log_client_hotplug, .suspend = drm_log_client_suspend, .resume = drm_log_client_resume, diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index baacd21e7341..a2d30cf9e06d 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -137,10 +137,9 @@ static void drm_bridge_connector_hpd_notify(struct drm_connector *connector, { struct drm_bridge_connector *bridge_connector = to_drm_bridge_connector(connector); - struct drm_bridge *bridge; /* Notify all bridges in the pipeline of hotplug events. */ - drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(bridge_connector->encoder, bridge) { if (bridge->funcs->hpd_notify) bridge->funcs->hpd_notify(bridge, status); } @@ -619,6 +618,20 @@ static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_f * Bridge Connector Initialisation */ +static void drm_bridge_connector_put_bridges(struct drm_device *dev, void *data) +{ + struct drm_bridge_connector *bridge_connector = (struct drm_bridge_connector *)data; + + drm_bridge_put(bridge_connector->bridge_edid); + drm_bridge_put(bridge_connector->bridge_hpd); + drm_bridge_put(bridge_connector->bridge_detect); + drm_bridge_put(bridge_connector->bridge_modes); + drm_bridge_put(bridge_connector->bridge_hdmi); + drm_bridge_put(bridge_connector->bridge_hdmi_audio); + drm_bridge_put(bridge_connector->bridge_dp_audio); + drm_bridge_put(bridge_connector->bridge_hdmi_cec); +} + /** * drm_bridge_connector_init - Initialise a connector for a chain of bridges * @drm: the DRM device @@ -639,7 +652,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, struct drm_bridge_connector *bridge_connector; struct drm_connector *connector; struct i2c_adapter *ddc = NULL; - struct drm_bridge *bridge, *panel_bridge = NULL; + struct drm_bridge *panel_bridge __free(drm_bridge_put) = NULL; unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB); unsigned int max_bpc = 8; bool support_hdcp = false; @@ -650,6 +663,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (!bridge_connector) return ERR_PTR(-ENOMEM); + ret = drmm_add_action(drm, drm_bridge_connector_put_bridges, bridge_connector); + if (ret) + return ERR_PTR(ret); + bridge_connector->encoder = encoder; /* @@ -667,20 +684,28 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, * detection are available, we don't support hotplug detection at all. */ connector_type = DRM_MODE_CONNECTOR_Unknown; - drm_for_each_bridge_in_chain(encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(encoder, bridge) { if (!bridge->interlace_allowed) connector->interlace_allowed = false; if (!bridge->ycbcr_420_allowed) connector->ycbcr_420_allowed = false; - if (bridge->ops & DRM_BRIDGE_OP_EDID) - bridge_connector->bridge_edid = bridge; - if (bridge->ops & DRM_BRIDGE_OP_HPD) - bridge_connector->bridge_hpd = bridge; - if (bridge->ops & DRM_BRIDGE_OP_DETECT) - bridge_connector->bridge_detect = bridge; - if (bridge->ops & DRM_BRIDGE_OP_MODES) - bridge_connector->bridge_modes = bridge; + if (bridge->ops & DRM_BRIDGE_OP_EDID) { + drm_bridge_put(bridge_connector->bridge_edid); + bridge_connector->bridge_edid = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_HPD) { + drm_bridge_put(bridge_connector->bridge_hpd); + bridge_connector->bridge_hpd = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_DETECT) { + drm_bridge_put(bridge_connector->bridge_detect); + bridge_connector->bridge_detect = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_MODES) { + drm_bridge_put(bridge_connector->bridge_modes); + bridge_connector->bridge_modes = drm_bridge_get(bridge); + } if (bridge->ops & DRM_BRIDGE_OP_HDMI) { if (bridge_connector->bridge_hdmi) return ERR_PTR(-EBUSY); @@ -688,7 +713,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->hdmi_clear_infoframe) return ERR_PTR(-EINVAL); - bridge_connector->bridge_hdmi = bridge; + bridge_connector->bridge_hdmi = drm_bridge_get(bridge); if (bridge->supported_formats) supported_formats = bridge->supported_formats; @@ -711,7 +736,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->hdmi_audio_shutdown) return ERR_PTR(-EINVAL); - bridge_connector->bridge_hdmi_audio = bridge; + bridge_connector->bridge_hdmi_audio = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) { @@ -729,21 +754,21 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->dp_audio_shutdown) return ERR_PTR(-EINVAL); - bridge_connector->bridge_dp_audio = bridge; + bridge_connector->bridge_dp_audio = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { if (bridge_connector->bridge_hdmi_cec) return ERR_PTR(-EBUSY); - bridge_connector->bridge_hdmi_cec = bridge; + bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { if (bridge_connector->bridge_hdmi_cec) return ERR_PTR(-EBUSY); - bridge_connector->bridge_hdmi_cec = bridge; + bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge); if (!bridge->funcs->hdmi_cec_enable || !bridge->funcs->hdmi_cec_log_addr || @@ -762,8 +787,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge->ddc) ddc = bridge->ddc; - if (drm_bridge_is_panel(bridge)) - panel_bridge = bridge; + if (drm_bridge_is_panel(bridge)) { + drm_bridge_put(panel_bridge); + panel_bridge = drm_bridge_get(bridge); + } if (bridge->support_hdcp) support_hdcp = true; @@ -818,7 +845,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_cec && bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { - bridge = bridge_connector->bridge_hdmi_cec; + struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; ret = drmm_connector_hdmi_cec_notifier_register(connector, NULL, @@ -829,7 +856,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_cec && bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { - bridge = bridge_connector->bridge_hdmi_cec; + struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; ret = drmm_connector_hdmi_cec_register(connector, &drm_bridge_connector_hdmi_cec_funcs, diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 4aaeae4fa03c..f9fdf19de74a 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -29,6 +29,7 @@ #include <linux/init.h> #include <linux/iopoll.h> #include <linux/kernel.h> +#include <linux/minmax.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/seq_file.h> @@ -123,6 +124,14 @@ bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], } EXPORT_SYMBOL(drm_dp_clock_recovery_ok); +bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); + + return lane_align & DP_POST_LT_ADJ_REQ_IN_PROGRESS; +} +EXPORT_SYMBOL(drm_dp_post_lt_adj_req_in_progress); + u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], int lane) { @@ -2543,6 +2552,10 @@ static const struct dpcd_quirk dpcd_quirk_list[] = { { OUI(0x00, 0x0C, 0xE7), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) }, /* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */ { OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) }, + /* Synaptics Panamera supports only a compressed bpp of 12 above 50% of its max DSC pixel throughput */ + { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x22), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) }, + { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x31), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) }, + { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x33), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) }, }; #undef OUI @@ -2832,6 +2845,158 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S } EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs); +/* + * See DP Standard v2.1a 2.8.4 Minimum Slices/Display, Table 2-159 and + * Appendix L.1 Derivation of Slice Count Requirements. + */ +static int dsc_sink_min_slice_throughput(int peak_pixel_rate) +{ + if (peak_pixel_rate >= 4800000) + return 600000; + else if (peak_pixel_rate >= 2700000) + return 400000; + else + return 340000; +} + +/** + * drm_dp_dsc_sink_max_slice_throughput() - Get a DSC sink's maximum pixel throughput per slice + * @dsc_dpcd: DSC sink's capabilities from DPCD + * @peak_pixel_rate: Cumulative peak pixel rate in kHz + * @is_rgb_yuv444: The mode is either RGB or YUV444 + * + * Return the DSC sink device's maximum pixel throughput per slice, based on + * the device's @dsc_dpcd capabilities, the @peak_pixel_rate of the transferred + * stream(s) and whether the output format @is_rgb_yuv444 or yuv422/yuv420. + * + * Note that @peak_pixel_rate is the total pixel rate transferred to the same + * DSC/display sink. For instance to calculate a tile's slice count of an MST + * multi-tiled display sink (not considering here the required + * rounding/alignment of slice count):: + * + * @peak_pixel_rate = tile_pixel_rate * tile_count + * total_slice_count = @peak_pixel_rate / drm_dp_dsc_sink_max_slice_throughput(@peak_pixel_rate) + * tile_slice_count = total_slice_count / tile_count + * + * Returns: + * The maximum pixel throughput per slice supported by the DSC sink device + * in kPixels/sec. + */ +int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], + int peak_pixel_rate, bool is_rgb_yuv444) +{ + int throughput; + int delta = 0; + int base; + + throughput = dsc_dpcd[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT]; + + if (is_rgb_yuv444) { + throughput = (throughput & DP_DSC_THROUGHPUT_MODE_0_MASK) >> + DP_DSC_THROUGHPUT_MODE_0_SHIFT; + + delta = ((dsc_dpcd[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT]) & + DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK) >> + DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT; /* in units of 2 MPixels/sec */ + delta *= 2000; + } else { + throughput = (throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >> + DP_DSC_THROUGHPUT_MODE_1_SHIFT; + } + + switch (throughput) { + case 0: + return dsc_sink_min_slice_throughput(peak_pixel_rate); + case 1: + base = 340000; + break; + case 2 ... 14: + base = 400000 + 50000 * (throughput - 2); + break; + case 15: + base = 170000; + break; + } + + return base + delta; +} +EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_throughput); + +static u8 dsc_branch_dpcd_cap(const u8 dpcd[DP_DSC_BRANCH_CAP_SIZE], int reg) +{ + return dpcd[reg - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0]; +} + +/** + * drm_dp_dsc_branch_max_overall_throughput() - Branch device's max overall DSC pixel throughput + * @dsc_branch_dpcd: DSC branch capabilities from DPCD + * @is_rgb_yuv444: The mode is either RGB or YUV444 + * + * Return the branch device's maximum overall DSC pixel throughput, based on + * the device's DPCD DSC branch capabilities, and whether the output + * format @is_rgb_yuv444 or yuv422/yuv420. + * + * Returns: + * - 0: The maximum overall throughput capability is not indicated by + * the device separately and it must be determined from the per-slice + * max throughput (see @drm_dp_dsc_branch_slice_max_throughput()) + * and the maximum slice count supported by the device. + * - > 0: The maximum overall DSC pixel throughput supported by the branch + * device in kPixels/sec. + */ +int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE], + bool is_rgb_yuv444) +{ + int throughput; + + if (is_rgb_yuv444) + throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_0); + else + throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_1); + + switch (throughput) { + case 0: + return 0; + case 1: + return 680000; + default: + return 600000 + 50000 * throughput; + } +} +EXPORT_SYMBOL(drm_dp_dsc_branch_max_overall_throughput); + +/** + * drm_dp_dsc_branch_max_line_width() - Branch device's max DSC line width + * @dsc_branch_dpcd: DSC branch capabilities from DPCD + * + * Return the branch device's maximum overall DSC line width, based on + * the device's @dsc_branch_dpcd capabilities. + * + * Returns: + * - 0: The maximum line width is not indicated by the device + * separately and it must be determined from the maximum + * slice count and slice-width supported by the device. + * - %-EINVAL: The device indicates an invalid maximum line width + * (< 5120 pixels). + * - >= 5120: The maximum line width in pixels. + */ +int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE]) +{ + int line_width = dsc_branch_dpcd_cap(dsc_branch_dpcd, DP_DSC_BRANCH_MAX_LINE_WIDTH); + + switch (line_width) { + case 0: + return 0; + case 1 ... 15: + return -EINVAL; + default: + return line_width * 320; + } +} +EXPORT_SYMBOL(drm_dp_dsc_branch_max_line_width); + static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], int address, u8 *buf, int buf_size) @@ -4128,22 +4293,61 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf { int fxp, fxp_min, fxp_max, fxp_actual, f = 1; int ret; - u8 pn, pn_min, pn_max; + u8 pn, pn_min, pn_max, bit_count; if (!bl->aux_set) return 0; - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &bit_count); if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n", aux->name, ret); return -ENODEV; } - pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + bit_count &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); + if (ret < 0) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", + aux->name, ret); + return -ENODEV; + } + pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); + if (ret < 0) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", + aux->name, ret); + return -ENODEV; + } + pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + if (unlikely(pn_min > pn_max)) { + drm_dbg_kms(aux->drm_dev, "%s: Invalid pwmgen bit count cap min/max returned: %d %d\n", + aux->name, pn_min, pn_max); + return -EINVAL; + } + + /* + * Per VESA eDP Spec v1.4b, section 3.3.10.2: + * If DP_EDP_PWMGEN_BIT_COUNT is less than DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, + * the sink must use the MIN value as the effective PWM bit count. + * Clamp the reported value to the [MIN, MAX] capability range to ensure + * correct brightness scaling on compliant eDP panels. + * Only enable this logic if the [MIN, MAX] range is valid in regard to Spec. + */ + pn = bit_count; + if (bit_count < pn_min) + pn = clamp(bit_count, pn_min, pn_max); + bl->max = (1 << pn) - 1; - if (!driver_pwm_freq_hz) + if (!driver_pwm_freq_hz) { + if (pn != bit_count) + goto bit_count_write_back; + return 0; + } /* * Set PWM Frequency divider to match desired frequency provided by the driver. @@ -4167,21 +4371,6 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf * - FxP is within 25% of desired value. * Note: 25% is arbitrary value and may need some tweak. */ - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); - if (ret < 0) { - drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", - aux->name, ret); - return 0; - } - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); - if (ret < 0) { - drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", - aux->name, ret); - return 0; - } - pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - /* Ensure frequency is within 25% of desired value */ fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); @@ -4199,12 +4388,17 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf break; } +bit_count_write_back: ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn); if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", aux->name, ret); return 0; } + + if (!driver_pwm_freq_hz) + return 0; + bl->pwmgen_bit_count = pn; bl->max = (1 << pn) - 1; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index cd15cf52f0c9..67e095e398a3 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -42,6 +42,7 @@ #include <drm/drm_mode.h> #include <drm/drm_print.h> #include <drm/drm_writeback.h> +#include <drm/drm_colorop.h> #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -107,6 +108,7 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state) kfree(state->connectors); kfree(state->crtcs); kfree(state->planes); + kfree(state->colorops); kfree(state->private_objs); } EXPORT_SYMBOL(drm_atomic_state_default_release); @@ -138,6 +140,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) sizeof(*state->planes), GFP_KERNEL); if (!state->planes) goto fail; + state->colorops = kcalloc(dev->mode_config.num_colorop, + sizeof(*state->colorops), GFP_KERNEL); + if (!state->colorops) + goto fail; /* * Because drm_atomic_state can be committed asynchronously we need our @@ -200,6 +206,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) drm_dbg_atomic(dev, "Clearing atomic state %p\n", state); + state->checked = false; + for (i = 0; i < state->num_connector; i++) { struct drm_connector *connector = state->connectors[i].ptr; @@ -207,9 +215,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; connector->funcs->atomic_destroy_state(connector, - state->connectors[i].state); + state->connectors[i].state_to_destroy); state->connectors[i].ptr = NULL; - state->connectors[i].state = NULL; + state->connectors[i].state_to_destroy = NULL; state->connectors[i].old_state = NULL; state->connectors[i].new_state = NULL; drm_connector_put(connector); @@ -222,10 +230,10 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; crtc->funcs->atomic_destroy_state(crtc, - state->crtcs[i].state); + state->crtcs[i].state_to_destroy); state->crtcs[i].ptr = NULL; - state->crtcs[i].state = NULL; + state->crtcs[i].state_to_destroy = NULL; state->crtcs[i].old_state = NULL; state->crtcs[i].new_state = NULL; @@ -242,20 +250,34 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; plane->funcs->atomic_destroy_state(plane, - state->planes[i].state); + state->planes[i].state_to_destroy); state->planes[i].ptr = NULL; - state->planes[i].state = NULL; + state->planes[i].state_to_destroy = NULL; state->planes[i].old_state = NULL; state->planes[i].new_state = NULL; } + for (i = 0; i < config->num_colorop; i++) { + struct drm_colorop *colorop = state->colorops[i].ptr; + + if (!colorop) + continue; + + drm_colorop_atomic_destroy_state(colorop, + state->colorops[i].state); + state->colorops[i].ptr = NULL; + state->colorops[i].state = NULL; + state->colorops[i].old_state = NULL; + state->colorops[i].new_state = NULL; + } + for (i = 0; i < state->num_private_objs; i++) { struct drm_private_obj *obj = state->private_objs[i].ptr; obj->funcs->atomic_destroy_state(obj, - state->private_objs[i].state); + state->private_objs[i].state_to_destroy); state->private_objs[i].ptr = NULL; - state->private_objs[i].state = NULL; + state->private_objs[i].state_to_destroy = NULL; state->private_objs[i].old_state = NULL; state->private_objs[i].new_state = NULL; } @@ -348,8 +370,9 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, struct drm_crtc_state *crtc_state; WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); - crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (crtc_state) return crtc_state; @@ -361,7 +384,7 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, if (!crtc_state) return ERR_PTR(-ENOMEM); - state->crtcs[index].state = crtc_state; + state->crtcs[index].state_to_destroy = crtc_state; state->crtcs[index].old_state = crtc->state; state->crtcs[index].new_state = crtc_state; state->crtcs[index].ptr = crtc; @@ -480,8 +503,8 @@ static int drm_atomic_connector_check(struct drm_connector *connector, } if (state->crtc) - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state->state, + state->crtc); if (writeback_job->fb && !crtc_state->active) { drm_dbg_atomic(connector->dev, @@ -528,13 +551,14 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, struct drm_plane_state *plane_state; WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); /* the legacy pointers should never be set */ WARN_ON(plane->fb); WARN_ON(plane->old_fb); WARN_ON(plane->crtc); - plane_state = drm_atomic_get_existing_plane_state(state, plane); + plane_state = drm_atomic_get_new_plane_state(state, plane); if (plane_state) return plane_state; @@ -546,7 +570,7 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, if (!plane_state) return ERR_PTR(-ENOMEM); - state->planes[index].state = plane_state; + state->planes[index].state_to_destroy = plane_state; state->planes[index].ptr = plane; state->planes[index].old_state = plane->state; state->planes[index].new_state = plane_state; @@ -568,6 +592,55 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, } EXPORT_SYMBOL(drm_atomic_get_plane_state); +/** + * drm_atomic_get_colorop_state - get colorop state + * @state: global atomic state object + * @colorop: colorop to get state object for + * + * This function returns the colorop state for the given colorop, allocating it + * if needed. It will also grab the relevant plane lock to make sure that the + * state is consistent. + * + * Returns: + * + * Either the allocated state or the error code encoded into the pointer. When + * the error is EDEADLK then the w/w mutex code has detected a deadlock and the + * entire atomic sequence must be restarted. All other errors are fatal. + */ +struct drm_colorop_state * +drm_atomic_get_colorop_state(struct drm_atomic_state *state, + struct drm_colorop *colorop) +{ + int ret, index = drm_colorop_index(colorop); + struct drm_colorop_state *colorop_state; + + WARN_ON(!state->acquire_ctx); + + colorop_state = drm_atomic_get_new_colorop_state(state, colorop); + if (colorop_state) + return colorop_state; + + ret = drm_modeset_lock(&colorop->plane->mutex, state->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + colorop_state = drm_atomic_helper_colorop_duplicate_state(colorop); + if (!colorop_state) + return ERR_PTR(-ENOMEM); + + state->colorops[index].state = colorop_state; + state->colorops[index].ptr = colorop; + state->colorops[index].old_state = colorop->state; + state->colorops[index].new_state = colorop_state; + colorop_state->state = state; + + drm_dbg_atomic(colorop->dev, "Added [COLOROP:%d:%d] %p state to %p\n", + colorop->base.id, colorop->type, colorop_state, state); + + return colorop_state; +} +EXPORT_SYMBOL(drm_atomic_get_colorop_state); + static bool plane_switching_crtc(const struct drm_plane_state *old_plane_state, const struct drm_plane_state *new_plane_state) @@ -707,6 +780,46 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, return 0; } +static void drm_atomic_colorop_print_state(struct drm_printer *p, + const struct drm_colorop_state *state) +{ + struct drm_colorop *colorop = state->colorop; + + drm_printf(p, "colorop[%u]:\n", colorop->base.id); + drm_printf(p, "\ttype=%s\n", drm_get_colorop_type_name(colorop->type)); + if (colorop->bypass_property) + drm_printf(p, "\tbypass=%u\n", state->bypass); + + switch (colorop->type) { + case DRM_COLOROP_1D_CURVE: + drm_printf(p, "\tcurve_1d_type=%s\n", + drm_get_colorop_curve_1d_type_name(state->curve_1d_type)); + break; + case DRM_COLOROP_1D_LUT: + drm_printf(p, "\tsize=%d\n", colorop->size); + drm_printf(p, "\tinterpolation=%s\n", + drm_get_colorop_lut1d_interpolation_name(colorop->lut1d_interpolation)); + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); + break; + case DRM_COLOROP_CTM_3X4: + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); + break; + case DRM_COLOROP_MULTIPLIER: + drm_printf(p, "\tmultiplier=%llu\n", state->multiplier); + break; + case DRM_COLOROP_3D_LUT: + drm_printf(p, "\tsize=%d\n", colorop->size); + drm_printf(p, "\tinterpolation=%s\n", + drm_get_colorop_lut3d_interpolation_name(colorop->lut3d_interpolation)); + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); + break; + default: + break; + } + + drm_printf(p, "\tnext=%d\n", colorop->next ? colorop->next->base.id : 0); +} + static void drm_atomic_plane_print_state(struct drm_printer *p, const struct drm_plane_state *state) { @@ -728,7 +841,8 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_printf(p, "\tcolor-range=%s\n", drm_get_color_range_name(state->color_range)); drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); - + drm_printf(p, "\tcolor-pipeline=%d\n", + state->color_pipeline ? state->color_pipeline->base.id : 0); if (plane->funcs->atomic_print_state) plane->funcs->atomic_print_state(p, state); } @@ -831,14 +945,17 @@ struct drm_private_state * drm_atomic_get_private_obj_state(struct drm_atomic_state *state, struct drm_private_obj *obj) { - int index, num_objs, i, ret; + int index, num_objs, ret; size_t size; struct __drm_private_objs_state *arr; struct drm_private_state *obj_state; - for (i = 0; i < state->num_private_objs; i++) - if (obj == state->private_objs[i].ptr) - return state->private_objs[i].state; + WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); + + obj_state = drm_atomic_get_new_private_obj_state(state, obj); + if (obj_state) + return obj_state; ret = drm_modeset_lock(&obj->lock, state->acquire_ctx); if (ret) @@ -858,7 +975,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state, if (!obj_state) return ERR_PTR(-ENOMEM); - state->private_objs[index].state = obj_state; + state->private_objs[index].state_to_destroy = obj_state; state->private_objs[index].old_state = obj->state; state->private_objs[index].new_state = obj_state; state->private_objs[index].ptr = obj; @@ -1129,6 +1246,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, struct drm_connector_state *connector_state; WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); if (ret) @@ -1152,15 +1270,16 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, state->num_connector = alloc; } - if (state->connectors[index].state) - return state->connectors[index].state; + connector_state = drm_atomic_get_new_connector_state(state, connector); + if (connector_state) + return connector_state; connector_state = connector->funcs->atomic_duplicate_state(connector); if (!connector_state) return ERR_PTR(-ENOMEM); drm_connector_get(connector); - state->connectors[index].state = connector_state; + state->connectors[index].state_to_destroy = connector_state; state->connectors[index].old_state = connector->state; state->connectors[index].new_state = connector_state; state->connectors[index].ptr = connector; @@ -1308,7 +1427,6 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, struct drm_encoder *encoder) { struct drm_bridge_state *bridge_state; - struct drm_bridge *bridge; if (!encoder) return 0; @@ -1317,7 +1435,7 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, "Adding all bridges for [encoder:%d:%s] to %p\n", encoder->base.id, encoder->name, state); - drm_for_each_bridge_in_chain(encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(encoder, bridge) { /* Skip bridges that don't implement the atomic state hooks. */ if (!bridge->funcs->atomic_duplicate_state) continue; @@ -1438,6 +1556,52 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state, EXPORT_SYMBOL(drm_atomic_add_affected_planes); /** + * drm_atomic_add_affected_colorops - add colorops for plane + * @state: atomic state + * @plane: DRM plane + * + * This function walks the current configuration and adds all colorops + * currently used by @plane to the atomic configuration @state. This is useful + * when an atomic commit also needs to check all currently enabled colorop on + * @plane, e.g. when changing the mode. It's also useful when re-enabling a plane + * to avoid special code to force-enable all colorops. + * + * Since acquiring a colorop state will always also acquire the w/w mutex of the + * current plane for that colorop (if there is any) adding all the colorop states for + * a plane will not reduce parallelism of atomic updates. + * + * Returns: + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK + * then the w/w mutex code has detected a deadlock and the entire atomic + * sequence must be restarted. All other errors are fatal. + */ +int +drm_atomic_add_affected_colorops(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + struct drm_colorop *colorop; + struct drm_colorop_state *colorop_state; + + WARN_ON(!drm_atomic_get_new_plane_state(state, plane)); + + drm_dbg_atomic(plane->dev, + "Adding all current colorops for [PLANE:%d:%s] to %p\n", + plane->base.id, plane->name, state); + + drm_for_each_colorop(colorop, plane->dev) { + if (colorop->plane != plane) + continue; + + colorop_state = drm_atomic_get_colorop_state(state, colorop); + if (IS_ERR(colorop_state)) + return PTR_ERR(colorop_state); + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_add_affected_colorops); + +/** * drm_atomic_check_only - check whether a given config would work * @state: atomic configuration to check * @@ -1541,6 +1705,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) requested_crtc, affected_crtc); } + state->checked = true; + return 0; } EXPORT_SYMBOL(drm_atomic_check_only); @@ -1833,6 +1999,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, bool take_locks) { struct drm_mode_config *config = &dev->mode_config; + struct drm_colorop *colorop; struct drm_plane *plane; struct drm_crtc *crtc; struct drm_connector *connector; @@ -1842,6 +2009,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, if (!drm_drv_uses_atomic_modeset(dev)) return; + list_for_each_entry(colorop, &config->colorop_list, head) { + if (take_locks) + drm_modeset_lock(&colorop->plane->mutex, NULL); + drm_atomic_colorop_print_state(p, colorop->state); + if (take_locks) + drm_modeset_unlock(&colorop->plane->mutex); + } + list_for_each_entry(plane, &config->plane_list, head) { if (take_locks) drm_modeset_lock(&plane->mutex, NULL); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index d5ebe6ea0acb..10adac9397cf 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1831,10 +1831,12 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, } for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { + wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc); + if (!(crtc_mask & drm_crtc_mask(crtc))) continue; - ret = wait_event_timeout(dev->vblank[i].queue, + ret = wait_event_timeout(*queue, state->crtcs[i].last_vblank_count != drm_crtc_vblank_count(crtc), msecs_to_jiffies(100)); @@ -3182,6 +3184,8 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_colorop *colorop; + struct drm_colorop_state *old_colorop_state, *new_colorop_state; struct drm_crtc_commit *commit; struct drm_private_obj *obj; struct drm_private_state *old_obj_state, *new_obj_state; @@ -3236,7 +3240,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_conn_state->state = state; new_conn_state->state = NULL; - state->connectors[i].state = old_conn_state; + state->connectors[i].state_to_destroy = old_conn_state; connector->state = new_conn_state; } @@ -3246,7 +3250,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_crtc_state->state = state; new_crtc_state->state = NULL; - state->crtcs[i].state = old_crtc_state; + state->crtcs[i].state_to_destroy = old_crtc_state; crtc->state = new_crtc_state; if (new_crtc_state->commit) { @@ -3259,6 +3263,16 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, } } + for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) { + WARN_ON(colorop->state != old_colorop_state); + + old_colorop_state->state = state; + new_colorop_state->state = NULL; + + state->colorops[i].state = old_colorop_state; + colorop->state = new_colorop_state; + } + drm_panic_lock(state->dev, flags); for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { WARN_ON(plane->state != old_plane_state); @@ -3266,7 +3280,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_plane_state->state = state; new_plane_state->state = NULL; - state->planes[i].state = old_plane_state; + state->planes[i].state_to_destroy = old_plane_state; plane->state = new_plane_state; } drm_panic_unlock(state->dev, flags); @@ -3277,7 +3291,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_obj_state->state = state; new_obj_state->state = NULL; - state->private_objs[i].state = old_obj_state; + state->private_objs[i].state_to_destroy = old_obj_state; obj->state = new_obj_state; } diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 7142e163e618..cee6d8fc44ad 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -268,6 +268,11 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, plane_state->color_range = val; } + if (plane->color_pipeline_property) { + /* default is always NULL, i.e., bypass */ + plane_state->color_pipeline = NULL; + } + if (plane->zpos_property) { if (!drm_object_property_get_default_value(&plane->base, plane->zpos_property, diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 85dbdaa4a2e2..7320db4b8489 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -35,6 +35,7 @@ #include <drm/drm_drv.h> #include <drm/drm_writeback.h> #include <drm/drm_vblank.h> +#include <drm/drm_colorop.h> #include <linux/export.h> #include <linux/dma-fence.h> @@ -258,6 +259,34 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); /** + * drm_atomic_set_colorop_for_plane - set colorop for plane + * @plane_state: atomic state object for the plane + * @colorop: colorop to use for the plane + * + * Helper function to select the color pipeline on a plane by setting + * it to the first drm_colorop element of the pipeline. + */ +void +drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state, + struct drm_colorop *colorop) +{ + struct drm_plane *plane = plane_state->plane; + + if (colorop) + drm_dbg_atomic(plane->dev, + "Set [COLOROP:%d] for [PLANE:%d:%s] state %p\n", + colorop->base.id, plane->base.id, plane->name, + plane_state); + else + drm_dbg_atomic(plane->dev, + "Set [NOCOLOROP] for [PLANE:%d:%s] state %p\n", + plane->base.id, plane->name, plane_state); + + plane_state->color_pipeline = colorop; +} +EXPORT_SYMBOL(drm_atomic_set_colorop_for_plane); + +/** * drm_atomic_set_crtc_for_connector - set CRTC for connector * @conn_state: atomic state object for the connector * @crtc: CRTC to use for the connector @@ -419,6 +448,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, set_out_fence_for_crtc(state->state, crtc, fence_ptr); } else if (property == crtc->scaling_filter_property) { state->scaling_filter = val; + } else if (property == crtc->sharpness_strength_property) { + state->sharpness_strength = val; } else if (crtc->funcs->atomic_set_property) { return crtc->funcs->atomic_set_property(crtc, state, property, val); } else { @@ -456,6 +487,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc, *val = 0; else if (property == crtc->scaling_filter_property) *val = state->scaling_filter; + else if (property == crtc->sharpness_strength_property) + *val = state->sharpness_strength; else if (crtc->funcs->atomic_get_property) return crtc->funcs->atomic_get_property(crtc, state, property, val); else { @@ -540,6 +573,16 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, state->color_encoding = val; } else if (property == plane->color_range_property) { state->color_range = val; + } else if (property == plane->color_pipeline_property) { + /* find DRM colorop object */ + struct drm_colorop *colorop = NULL; + + colorop = drm_colorop_find(dev, file_priv, val); + + if (val && !colorop) + return -EACCES; + + drm_atomic_set_colorop_for_plane(state, colorop); } else if (property == config->prop_fb_damage_clips) { ret = drm_property_replace_blob_from_id(dev, &state->fb_damage_clips, @@ -622,6 +665,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->color_encoding; } else if (property == plane->color_range_property) { *val = state->color_range; + } else if (property == plane->color_pipeline_property) { + *val = (state->color_pipeline) ? state->color_pipeline->base.id : 0; } else if (property == config->prop_fb_damage_clips) { *val = (state->fb_damage_clips) ? state->fb_damage_clips->base.id : 0; @@ -644,6 +689,96 @@ drm_atomic_plane_get_property(struct drm_plane *plane, return 0; } +static int drm_atomic_color_set_data_property(struct drm_colorop *colorop, + struct drm_colorop_state *state, + struct drm_property *property, + uint64_t val) +{ + ssize_t elem_size = -1; + ssize_t size = -1; + bool replaced = false; + + switch (colorop->type) { + case DRM_COLOROP_1D_LUT: + size = colorop->size * sizeof(struct drm_color_lut32); + break; + case DRM_COLOROP_CTM_3X4: + size = sizeof(struct drm_color_ctm_3x4); + break; + case DRM_COLOROP_3D_LUT: + size = colorop->size * colorop->size * colorop->size * + sizeof(struct drm_color_lut32); + break; + default: + /* should never get here */ + return -EINVAL; + } + + return drm_property_replace_blob_from_id(colorop->dev, + &state->data, + val, + size, + elem_size, + &replaced); +} + +static int drm_atomic_colorop_set_property(struct drm_colorop *colorop, + struct drm_colorop_state *state, + struct drm_file *file_priv, + struct drm_property *property, + uint64_t val) +{ + if (property == colorop->bypass_property) { + state->bypass = val; + } else if (property == colorop->lut1d_interpolation_property) { + colorop->lut1d_interpolation = val; + } else if (property == colorop->curve_1d_type_property) { + state->curve_1d_type = val; + } else if (property == colorop->multiplier_property) { + state->multiplier = val; + } else if (property == colorop->lut3d_interpolation_property) { + colorop->lut3d_interpolation = val; + } else if (property == colorop->data_property) { + return drm_atomic_color_set_data_property(colorop, state, + property, val); + } else { + drm_dbg_atomic(colorop->dev, + "[COLOROP:%d:%d] unknown property [PROP:%d:%s]\n", + colorop->base.id, colorop->type, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +drm_atomic_colorop_get_property(struct drm_colorop *colorop, + const struct drm_colorop_state *state, + struct drm_property *property, uint64_t *val) +{ + if (property == colorop->type_property) + *val = colorop->type; + else if (property == colorop->bypass_property) + *val = state->bypass; + else if (property == colorop->lut1d_interpolation_property) + *val = colorop->lut1d_interpolation; + else if (property == colorop->curve_1d_type_property) + *val = state->curve_1d_type; + else if (property == colorop->multiplier_property) + *val = state->multiplier; + else if (property == colorop->size_property) + *val = colorop->size; + else if (property == colorop->lut3d_interpolation_property) + *val = colorop->lut3d_interpolation; + else if (property == colorop->data_property) + *val = (state->data) ? state->data->base.id : 0; + else + return -EINVAL; + + return 0; +} + static int drm_atomic_set_writeback_fb_for_connector( struct drm_connector_state *conn_state, struct drm_framebuffer *fb) @@ -910,6 +1045,15 @@ int drm_atomic_get_property(struct drm_mode_object *obj, plane->state, property, val); break; } + case DRM_MODE_OBJECT_COLOROP: { + struct drm_colorop *colorop = obj_to_colorop(obj); + + if (colorop->plane) + WARN_ON(!drm_modeset_is_locked(&colorop->plane->mutex)); + + ret = drm_atomic_colorop_get_property(colorop, colorop->state, property, val); + break; + } default: drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id); ret = -EINVAL; @@ -1107,6 +1251,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state, ret = drm_atomic_plane_set_property(plane, plane_state, file_priv, prop, prop_value); + + break; + } + case DRM_MODE_OBJECT_COLOROP: { + struct drm_colorop *colorop = obj_to_colorop(obj); + struct drm_colorop_state *colorop_state; + + colorop_state = drm_atomic_get_colorop_state(state, colorop); + if (IS_ERR(colorop_state)) { + ret = PTR_ERR(colorop_state); + break; + } + + ret = drm_atomic_colorop_set_property(colorop, colorop_state, + file_priv, prop, prop_value); break; } default: @@ -1446,6 +1605,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); state->acquire_ctx = &ctx; state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); + state->plane_color_pipeline = file_priv->plane_color_pipeline; retry: copied_objs = 0; diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index d031447eebc9..8f355df883d8 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -197,15 +197,22 @@ * driver. */ +/* Protect bridge_list and bridge_lingering_list */ static DEFINE_MUTEX(bridge_lock); static LIST_HEAD(bridge_list); +static LIST_HEAD(bridge_lingering_list); static void __drm_bridge_free(struct kref *kref) { struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount); + mutex_lock(&bridge_lock); + list_del(&bridge->list); + mutex_unlock(&bridge_lock); + if (bridge->funcs->destroy) bridge->funcs->destroy(bridge); + kfree(bridge->container); } @@ -273,6 +280,7 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, return ERR_PTR(-ENOMEM); bridge = container + offset; + INIT_LIST_HEAD(&bridge->list); bridge->container = container; bridge->funcs = funcs; kref_init(&bridge->refcount); @@ -286,10 +294,13 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, EXPORT_SYMBOL(__devm_drm_bridge_alloc); /** - * drm_bridge_add - add the given bridge to the global bridge list + * drm_bridge_add - register a bridge * * @bridge: bridge control structure * + * Add the given bridge to the global list of bridges, where they can be + * found by users via of_drm_find_bridge(). + * * The bridge to be added must have been allocated by * devm_drm_bridge_alloc(). */ @@ -300,6 +311,14 @@ void drm_bridge_add(struct drm_bridge *bridge) drm_bridge_get(bridge); + /* + * If the bridge was previously added and then removed, it is now + * in bridge_lingering_list. Remove it or bridge_lingering_list will be + * corrupted when adding this bridge to bridge_list below. + */ + if (!list_empty(&bridge->list)) + list_del_init(&bridge->list); + mutex_init(&bridge->hpd_mutex); if (bridge->ops & DRM_BRIDGE_OP_HDMI) @@ -336,14 +355,19 @@ int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge) EXPORT_SYMBOL(devm_drm_bridge_add); /** - * drm_bridge_remove - remove the given bridge from the global bridge list + * drm_bridge_remove - unregister a bridge * * @bridge: bridge control structure + * + * Remove the given bridge from the global list of registered bridges, so + * it won't be found by users via of_drm_find_bridge(), and add it to the + * lingering bridge list, to keep track of it until its allocated memory is + * eventually freed. */ void drm_bridge_remove(struct drm_bridge *bridge) { mutex_lock(&bridge_lock); - list_del_init(&bridge->list); + list_move_tail(&bridge->list, &bridge_lingering_list); mutex_unlock(&bridge_lock); mutex_destroy(&bridge->hpd_mutex); @@ -398,6 +422,9 @@ static bool drm_bridge_is_atomic(struct drm_bridge *bridge) * If non-NULL the previous bridge must be already attached by a call to this * function. * + * The bridge to be attached must have been previously added by + * drm_bridge_add(). + * * Note that bridges attached to encoders are auto-detached during encoder * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally * *not* be balanced with a drm_bridge_detach() in driver code. @@ -414,6 +441,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, if (!encoder || !bridge) return -EINVAL; + if (!bridge->container) + DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n"); + + if (list_empty(&bridge->list)) + DRM_WARN("Missing drm_bridge_add() before attach\n"); + drm_bridge_get(bridge); if (previous && (!previous->dev || previous->encoder != encoder)) { @@ -1062,12 +1095,12 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, struct drm_encoder *encoder = bridge->encoder; struct drm_bridge_state *last_bridge_state; unsigned int i, num_out_bus_fmts = 0; - struct drm_bridge *last_bridge; u32 *out_bus_fmts; int ret = 0; - last_bridge = list_last_entry(&encoder->bridge_chain, - struct drm_bridge, chain_node); + struct drm_bridge *last_bridge __free(drm_bridge_put) = + drm_bridge_get(list_last_entry(&encoder->bridge_chain, + struct drm_bridge, chain_node)); last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, last_bridge); @@ -1121,7 +1154,6 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_bridge_state *bridge_state, *next_bridge_state; - struct drm_bridge *next_bridge; u32 output_flags = 0; bridge_state = drm_atomic_get_new_bridge_state(state, bridge); @@ -1130,7 +1162,7 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, if (!bridge_state) return; - next_bridge = drm_bridge_get_next_bridge(bridge); + struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge); /* * Let's try to apply the most common case here, that is, propagate @@ -1432,17 +1464,20 @@ EXPORT_SYMBOL(devm_drm_put_bridge); static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, struct drm_bridge *bridge, - unsigned int idx) + unsigned int idx, + bool lingering) { drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); - drm_printf(p, "\trefcount: %u\n", kref_read(&bridge->refcount)); + drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount), + lingering ? " [lingering]" : ""); drm_printf(p, "\ttype: [%d] %s\n", bridge->type, drm_get_connector_type_name(bridge->type)); - if (bridge->of_node) + /* The OF node could be freed after drm_bridge_remove() */ + if (bridge->of_node && !lingering) drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node); drm_printf(p, "\tops: [0x%x]", bridge->ops); @@ -1468,7 +1503,10 @@ static int allbridges_show(struct seq_file *m, void *data) mutex_lock(&bridge_lock); list_for_each_entry(bridge, &bridge_list, list) - drm_bridge_debugfs_show_bridge(&p, bridge, idx++); + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false); + + list_for_each_entry(bridge, &bridge_lingering_list, list) + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true); mutex_unlock(&bridge_lock); @@ -1480,11 +1518,10 @@ static int encoder_bridges_show(struct seq_file *m, void *data) { struct drm_encoder *encoder = m->private; struct drm_printer p = drm_seq_file_printer(m); - struct drm_bridge *bridge; unsigned int idx = 0; - drm_for_each_bridge_in_chain(encoder, bridge) - drm_bridge_debugfs_show_bridge(&p, bridge, idx++); + drm_for_each_bridge_in_chain_scoped(encoder, bridge) + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false); return 0; } diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index a94061f373de..2f279b46bd2c 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -11,9 +11,19 @@ #include <linux/sizes.h> #include <drm/drm_buddy.h> +#include <drm/drm_print.h> + +enum drm_buddy_free_tree { + DRM_BUDDY_CLEAR_TREE = 0, + DRM_BUDDY_DIRTY_TREE, + DRM_BUDDY_MAX_FREE_TREES, +}; static struct kmem_cache *slab_blocks; +#define for_each_free_tree(tree) \ + for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++) + static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, struct drm_buddy_block *parent, unsigned int order, @@ -31,6 +41,8 @@ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, block->header |= order; block->parent = parent; + RB_CLEAR_NODE(&block->rb); + BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); return block; } @@ -41,23 +53,64 @@ static void drm_block_free(struct drm_buddy *mm, kmem_cache_free(slab_blocks, block); } -static void list_insert_sorted(struct drm_buddy *mm, - struct drm_buddy_block *block) +static enum drm_buddy_free_tree +get_block_tree(struct drm_buddy_block *block) { - struct drm_buddy_block *node; - struct list_head *head; + return drm_buddy_block_is_clear(block) ? + DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; +} - head = &mm->free_list[drm_buddy_block_order(block)]; - if (list_empty(head)) { - list_add(&block->link, head); - return; - } +static struct drm_buddy_block * +rbtree_get_free_block(const struct rb_node *node) +{ + return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL; +} - list_for_each_entry(node, head, link) - if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node)) - break; +static struct drm_buddy_block * +rbtree_last_free_block(struct rb_root *root) +{ + return rbtree_get_free_block(rb_last(root)); +} - __list_add(&block->link, node->link.prev, &node->link); +static bool rbtree_is_empty(struct rb_root *root) +{ + return RB_EMPTY_ROOT(root); +} + +static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block, + const struct drm_buddy_block *node) +{ + return drm_buddy_block_offset(block) < drm_buddy_block_offset(node); +} + +static bool rbtree_block_offset_less(struct rb_node *block, + const struct rb_node *node) +{ + return drm_buddy_block_offset_less(rbtree_get_free_block(block), + rbtree_get_free_block(node)); +} + +static void rbtree_insert(struct drm_buddy *mm, + struct drm_buddy_block *block, + enum drm_buddy_free_tree tree) +{ + rb_add(&block->rb, + &mm->free_trees[tree][drm_buddy_block_order(block)], + rbtree_block_offset_less); +} + +static void rbtree_remove(struct drm_buddy *mm, + struct drm_buddy_block *block) +{ + unsigned int order = drm_buddy_block_order(block); + enum drm_buddy_free_tree tree; + struct rb_root *root; + + tree = get_block_tree(block); + root = &mm->free_trees[tree][order]; + + rb_erase(&block->rb, root); + RB_CLEAR_NODE(&block->rb); } static void clear_reset(struct drm_buddy_block *block) @@ -70,29 +123,34 @@ static void mark_cleared(struct drm_buddy_block *block) block->header |= DRM_BUDDY_HEADER_CLEAR; } -static void mark_allocated(struct drm_buddy_block *block) +static void mark_allocated(struct drm_buddy *mm, + struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_ALLOCATED; - list_del(&block->link); + rbtree_remove(mm, block); } static void mark_free(struct drm_buddy *mm, struct drm_buddy_block *block) { + enum drm_buddy_free_tree tree; + block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_FREE; - list_insert_sorted(mm, block); + tree = get_block_tree(block); + rbtree_insert(mm, block, tree); } -static void mark_split(struct drm_buddy_block *block) +static void mark_split(struct drm_buddy *mm, + struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_SPLIT; - list_del(&block->link); + rbtree_remove(mm, block); } static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) @@ -148,7 +206,7 @@ static unsigned int __drm_buddy_free(struct drm_buddy *mm, mark_cleared(parent); } - list_del(&buddy->link); + rbtree_remove(mm, buddy); if (force_merge && drm_buddy_block_is_clear(buddy)) mm->clear_avail -= drm_buddy_block_size(mm, buddy); @@ -169,7 +227,7 @@ static int __force_merge(struct drm_buddy *mm, u64 end, unsigned int min_order) { - unsigned int order; + unsigned int tree, order; int i; if (!min_order) @@ -178,44 +236,48 @@ static int __force_merge(struct drm_buddy *mm, if (min_order > mm->max_order) return -EINVAL; - for (i = min_order - 1; i >= 0; i--) { - struct drm_buddy_block *block, *prev; + for_each_free_tree(tree) { + for (i = min_order - 1; i >= 0; i--) { + struct rb_node *iter = rb_last(&mm->free_trees[tree][i]); - list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) { - struct drm_buddy_block *buddy; - u64 block_start, block_end; + while (iter) { + struct drm_buddy_block *block, *buddy; + u64 block_start, block_end; - if (!block->parent) - continue; + block = rbtree_get_free_block(iter); + iter = rb_prev(iter); - block_start = drm_buddy_block_offset(block); - block_end = block_start + drm_buddy_block_size(mm, block) - 1; + if (!block || !block->parent) + continue; - if (!contains(start, end, block_start, block_end)) - continue; + block_start = drm_buddy_block_offset(block); + block_end = block_start + drm_buddy_block_size(mm, block) - 1; - buddy = __get_buddy(block); - if (!drm_buddy_block_is_free(buddy)) - continue; + if (!contains(start, end, block_start, block_end)) + continue; - WARN_ON(drm_buddy_block_is_clear(block) == - drm_buddy_block_is_clear(buddy)); + buddy = __get_buddy(block); + if (!drm_buddy_block_is_free(buddy)) + continue; - /* - * If the prev block is same as buddy, don't access the - * block in the next iteration as we would free the - * buddy block as part of the free function. - */ - if (prev == buddy) - prev = list_prev_entry(prev, link); + WARN_ON(drm_buddy_block_is_clear(block) == + drm_buddy_block_is_clear(buddy)); - list_del(&block->link); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -= drm_buddy_block_size(mm, block); + /* + * Advance to the next node when the current node is the buddy, + * as freeing the block will also remove its buddy from the tree. + */ + if (iter == &buddy->rb) + iter = rb_prev(iter); - order = __drm_buddy_free(mm, block, true); - if (order >= min_order) - return 0; + rbtree_remove(mm, block); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail -= drm_buddy_block_size(mm, block); + + order = __drm_buddy_free(mm, block, true); + if (order >= min_order) + return 0; + } } } @@ -236,8 +298,8 @@ static int __force_merge(struct drm_buddy *mm, */ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) { - unsigned int i; - u64 offset; + unsigned int i, j, root_count = 0; + u64 offset = 0; if (size < chunk_size) return -EINVAL; @@ -258,14 +320,22 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER); - mm->free_list = kmalloc_array(mm->max_order + 1, - sizeof(struct list_head), - GFP_KERNEL); - if (!mm->free_list) + mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES, + sizeof(*mm->free_trees), + GFP_KERNEL); + if (!mm->free_trees) return -ENOMEM; - for (i = 0; i <= mm->max_order; ++i) - INIT_LIST_HEAD(&mm->free_list[i]); + for_each_free_tree(i) { + mm->free_trees[i] = kmalloc_array(mm->max_order + 1, + sizeof(struct rb_root), + GFP_KERNEL); + if (!mm->free_trees[i]) + goto out_free_tree; + + for (j = 0; j <= mm->max_order; ++j) + mm->free_trees[i][j] = RB_ROOT; + } mm->n_roots = hweight64(size); @@ -273,10 +343,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) sizeof(struct drm_buddy_block *), GFP_KERNEL); if (!mm->roots) - goto out_free_list; - - offset = 0; - i = 0; + goto out_free_tree; /* * Split into power-of-two blocks, in case we are given a size that is @@ -296,24 +363,26 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) mark_free(mm, root); - BUG_ON(i > mm->max_order); + BUG_ON(root_count > mm->max_order); BUG_ON(drm_buddy_block_size(mm, root) < chunk_size); - mm->roots[i] = root; + mm->roots[root_count] = root; offset += root_size; size -= root_size; - i++; + root_count++; } while (size); return 0; out_free_roots: - while (i--) - drm_block_free(mm, mm->roots[i]); + while (root_count--) + drm_block_free(mm, mm->roots[root_count]); kfree(mm->roots); -out_free_list: - kfree(mm->free_list); +out_free_tree: + while (i--) + kfree(mm->free_trees[i]); + kfree(mm->free_trees); return -ENOMEM; } EXPORT_SYMBOL(drm_buddy_init); @@ -323,7 +392,7 @@ EXPORT_SYMBOL(drm_buddy_init); * * @mm: DRM buddy manager to free * - * Cleanup memory manager resources and the freelist + * Cleanup memory manager resources and the freetree */ void drm_buddy_fini(struct drm_buddy *mm) { @@ -349,8 +418,9 @@ void drm_buddy_fini(struct drm_buddy *mm) WARN_ON(mm->avail != mm->size); + for_each_free_tree(i) + kfree(mm->free_trees[i]); kfree(mm->roots); - kfree(mm->free_list); } EXPORT_SYMBOL(drm_buddy_fini); @@ -374,8 +444,7 @@ static int split_block(struct drm_buddy *mm, return -ENOMEM; } - mark_free(mm, block->left); - mark_free(mm, block->right); + mark_split(mm, block); if (drm_buddy_block_is_clear(block)) { mark_cleared(block->left); @@ -383,7 +452,8 @@ static int split_block(struct drm_buddy *mm, clear_reset(block); } - mark_split(block); + mark_free(mm, block->left); + mark_free(mm, block->right); return 0; } @@ -412,10 +482,11 @@ EXPORT_SYMBOL(drm_get_buddy); * @is_clear: blocks clear state * * Reset the clear state based on @is_clear value for each block - * in the freelist. + * in the freetree. */ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) { + enum drm_buddy_free_tree src_tree, dst_tree; u64 root_size, size, start; unsigned int order; int i; @@ -430,19 +501,24 @@ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) size -= root_size; } + src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + for (i = 0; i <= mm->max_order; ++i) { - struct drm_buddy_block *block; - - list_for_each_entry_reverse(block, &mm->free_list[i], link) { - if (is_clear != drm_buddy_block_is_clear(block)) { - if (is_clear) { - mark_cleared(block); - mm->clear_avail += drm_buddy_block_size(mm, block); - } else { - clear_reset(block); - mm->clear_avail -= drm_buddy_block_size(mm, block); - } + struct rb_root *root = &mm->free_trees[src_tree][i]; + struct drm_buddy_block *block, *tmp; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + rbtree_remove(mm, block); + if (is_clear) { + mark_cleared(block); + mm->clear_avail += drm_buddy_block_size(mm, block); + } else { + clear_reset(block); + mm->clear_avail -= drm_buddy_block_size(mm, block); } + + rbtree_insert(mm, block, dst_tree); } } } @@ -632,23 +708,17 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm, } static struct drm_buddy_block * -get_maxblock(struct drm_buddy *mm, unsigned int order, - unsigned long flags) +get_maxblock(struct drm_buddy *mm, + unsigned int order, + enum drm_buddy_free_tree tree) { struct drm_buddy_block *max_block = NULL, *block = NULL; + struct rb_root *root; unsigned int i; for (i = order; i <= mm->max_order; ++i) { - struct drm_buddy_block *tmp_block; - - list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) { - if (block_incompatible(tmp_block, flags)) - continue; - - block = tmp_block; - break; - } - + root = &mm->free_trees[tree][i]; + block = rbtree_last_free_block(root); if (!block) continue; @@ -667,46 +737,44 @@ get_maxblock(struct drm_buddy *mm, unsigned int order, } static struct drm_buddy_block * -alloc_from_freelist(struct drm_buddy *mm, +alloc_from_freetree(struct drm_buddy *mm, unsigned int order, unsigned long flags) { struct drm_buddy_block *block = NULL; + struct rb_root *root; + enum drm_buddy_free_tree tree; unsigned int tmp; int err; + tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ? + DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { - block = get_maxblock(mm, order, flags); + block = get_maxblock(mm, order, tree); if (block) /* Store the obtained block order */ tmp = drm_buddy_block_order(block); } else { for (tmp = order; tmp <= mm->max_order; ++tmp) { - struct drm_buddy_block *tmp_block; - - list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) { - if (block_incompatible(tmp_block, flags)) - continue; - - block = tmp_block; - break; - } - + /* Get RB tree root for this order and tree */ + root = &mm->free_trees[tree][tmp]; + block = rbtree_last_free_block(root); if (block) break; } } if (!block) { - /* Fallback method */ + /* Try allocating from the other tree */ + tree = (tree == DRM_BUDDY_CLEAR_TREE) ? + DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + for (tmp = order; tmp <= mm->max_order; ++tmp) { - if (!list_empty(&mm->free_list[tmp])) { - block = list_last_entry(&mm->free_list[tmp], - struct drm_buddy_block, - link); - if (block) - break; - } + root = &mm->free_trees[tree][tmp]; + block = rbtree_last_free_block(root); + if (block) + break; } if (!block) @@ -771,7 +839,7 @@ static int __alloc_range(struct drm_buddy *mm, if (contains(start, end, block_start, block_end)) { if (drm_buddy_block_is_free(block)) { - mark_allocated(block); + mark_allocated(mm, block); total_allocated += drm_buddy_block_size(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) @@ -849,10 +917,9 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, { u64 rhs_offset, lhs_offset, lhs_size, filled; struct drm_buddy_block *block; - struct list_head *list; + unsigned int tree, order; LIST_HEAD(blocks_lhs); unsigned long pages; - unsigned int order; u64 modify_size; int err; @@ -862,35 +929,45 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, if (order == 0) return -ENOSPC; - list = &mm->free_list[order]; - if (list_empty(list)) - return -ENOSPC; + for_each_free_tree(tree) { + struct rb_root *root; + struct rb_node *iter; + + root = &mm->free_trees[tree][order]; + if (rbtree_is_empty(root)) + continue; - list_for_each_entry_reverse(block, list, link) { - /* Allocate blocks traversing RHS */ - rhs_offset = drm_buddy_block_offset(block); - err = __drm_buddy_alloc_range(mm, rhs_offset, size, - &filled, blocks); - if (!err || err != -ENOSPC) - return err; - - lhs_size = max((size - filled), min_block_size); - if (!IS_ALIGNED(lhs_size, min_block_size)) - lhs_size = round_up(lhs_size, min_block_size); - - /* Allocate blocks traversing LHS */ - lhs_offset = drm_buddy_block_offset(block) - lhs_size; - err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, - NULL, &blocks_lhs); - if (!err) { - list_splice(&blocks_lhs, blocks); - return 0; - } else if (err != -ENOSPC) { + iter = rb_last(root); + while (iter) { + block = rbtree_get_free_block(iter); + + /* Allocate blocks traversing RHS */ + rhs_offset = drm_buddy_block_offset(block); + err = __drm_buddy_alloc_range(mm, rhs_offset, size, + &filled, blocks); + if (!err || err != -ENOSPC) + return err; + + lhs_size = max((size - filled), min_block_size); + if (!IS_ALIGNED(lhs_size, min_block_size)) + lhs_size = round_up(lhs_size, min_block_size); + + /* Allocate blocks traversing LHS */ + lhs_offset = drm_buddy_block_offset(block) - lhs_size; + err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, + NULL, &blocks_lhs); + if (!err) { + list_splice(&blocks_lhs, blocks); + return 0; + } else if (err != -ENOSPC) { + drm_buddy_free_list_internal(mm, blocks); + return err; + } + /* Free blocks for the next iteration */ drm_buddy_free_list_internal(mm, blocks); - return err; + + iter = rb_prev(iter); } - /* Free blocks for the next iteration */ - drm_buddy_free_list_internal(mm, blocks); } return -ENOSPC; @@ -976,7 +1053,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm, list_add(&block->tmp_link, &dfs); err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); if (err) { - mark_allocated(block); + mark_allocated(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) mm->clear_avail -= drm_buddy_block_size(mm, block); @@ -999,8 +1076,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, return __drm_buddy_alloc_range_bias(mm, start, end, order, flags); else - /* Allocate from freelist */ - return alloc_from_freelist(mm, order, flags); + /* Allocate from freetree */ + return alloc_from_freetree(mm, order, flags); } /** @@ -1017,8 +1094,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, * alloc_range_bias() called on range limitations, which traverses * the tree and returns the desired block. * - * alloc_from_freelist() called when *no* range restrictions - * are enforced, which picks the block from the freelist. + * alloc_from_freetree() called when *no* range restrictions + * are enforced, which picks the block from the freetree. * * Returns: * 0 on success, error code on failure. @@ -1120,7 +1197,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, } } while (1); - mark_allocated(block); + mark_allocated(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) mm->clear_avail -= drm_buddy_block_size(mm, block); @@ -1201,12 +1278,18 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); for (order = mm->max_order; order >= 0; order--) { - struct drm_buddy_block *block; + struct drm_buddy_block *block, *tmp; + struct rb_root *root; u64 count = 0, free; + unsigned int tree; - list_for_each_entry(block, &mm->free_list[order], link) { - BUG_ON(!drm_buddy_block_is_free(block)); - count++; + for_each_free_tree(tree) { + root = &mm->free_trees[tree][order]; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + BUG_ON(!drm_buddy_block_is_free(block)); + count++; + } } drm_printf(p, "order-%2d ", order); diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 3fa38d4ac70b..a82d741e6630 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -11,12 +11,14 @@ #include <linux/slab.h> #include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mode.h> #include <drm/drm_print.h> @@ -168,29 +170,59 @@ void drm_client_release(struct drm_client_dev *client) drm_client_modeset_free(client); drm_client_close(client); + + if (client->funcs && client->funcs->free) + client->funcs->free(client); + drm_dev_put(dev); } EXPORT_SYMBOL(drm_client_release); -static void drm_client_buffer_delete(struct drm_client_buffer *buffer) +/** + * drm_client_buffer_delete - Delete a client buffer + * @buffer: DRM client buffer + */ +void drm_client_buffer_delete(struct drm_client_buffer *buffer) { - if (buffer->gem) { - drm_gem_vunmap(buffer->gem, &buffer->map); - drm_gem_object_put(buffer->gem); - } + struct drm_gem_object *gem; + int ret; + + if (!buffer) + return; + + gem = buffer->fb->obj[0]; + drm_gem_vunmap(gem, &buffer->map); + + ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file); + if (ret) + drm_err(buffer->client->dev, + "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret); + + drm_gem_object_put(buffer->gem); kfree(buffer); } +EXPORT_SYMBOL(drm_client_buffer_delete); static struct drm_client_buffer * drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, - u32 format, u32 *handle) + u32 format, u32 handle, u32 pitch) { - const struct drm_format_info *info = drm_format_info(format); - struct drm_mode_create_dumb dumb_args = { }; + struct drm_mode_fb_cmd2 fb_req = { + .width = width, + .height = height, + .pixel_format = format, + .handles = { + handle, + }, + .pitches = { + pitch, + }, + }; struct drm_device *dev = client->dev; struct drm_client_buffer *buffer; struct drm_gem_object *obj; + struct drm_framebuffer *fb; int ret; buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); @@ -199,28 +231,38 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, buffer->client = client; - dumb_args.width = width; - dumb_args.height = height; - dumb_args.bpp = drm_format_info_bpp(info, 0); - ret = drm_mode_create_dumb(dev, &dumb_args, client->file); - if (ret) - goto err_delete; - - obj = drm_gem_object_lookup(client->file, dumb_args.handle); + obj = drm_gem_object_lookup(client->file, handle); if (!obj) { ret = -ENOENT; goto err_delete; } - buffer->pitch = dumb_args.pitch; + ret = drm_mode_addfb2(dev, &fb_req, client->file); + if (ret) + goto err_drm_gem_object_put; + + fb = drm_framebuffer_lookup(dev, client->file, fb_req.fb_id); + if (drm_WARN_ON(dev, !fb)) { + ret = -ENOENT; + goto err_drm_mode_rmfb; + } + + /* drop the reference we picked up in framebuffer lookup */ + drm_framebuffer_put(fb); + + strscpy(fb->comm, client->name, TASK_COMM_LEN); + buffer->gem = obj; - *handle = dumb_args.handle; + buffer->fb = fb; return buffer; +err_drm_mode_rmfb: + drm_mode_rmfb(dev, fb_req.fb_id, client->file); +err_drm_gem_object_put: + drm_gem_object_put(obj); err_delete: - drm_client_buffer_delete(buffer); - + kfree(buffer); return ERR_PTR(ret); } @@ -247,7 +289,7 @@ err_delete: int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, struct iosys_map *map_copy) { - struct drm_gem_object *gem = buffer->gem; + struct drm_gem_object *gem = buffer->fb->obj[0]; struct iosys_map *map = &buffer->map; int ret; @@ -276,7 +318,7 @@ EXPORT_SYMBOL(drm_client_buffer_vmap_local); */ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer) { - struct drm_gem_object *gem = buffer->gem; + struct drm_gem_object *gem = buffer->fb->obj[0]; struct iosys_map *map = &buffer->map; drm_gem_vunmap_locked(gem, map); @@ -307,9 +349,10 @@ EXPORT_SYMBOL(drm_client_buffer_vunmap_local); int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct iosys_map *map_copy) { + struct drm_gem_object *gem = buffer->fb->obj[0]; int ret; - ret = drm_gem_vmap(buffer->gem, &buffer->map); + ret = drm_gem_vmap(gem, &buffer->map); if (ret) return ret; *map_copy = buffer->map; @@ -328,57 +371,14 @@ EXPORT_SYMBOL(drm_client_buffer_vmap); */ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) { - drm_gem_vunmap(buffer->gem, &buffer->map); -} -EXPORT_SYMBOL(drm_client_buffer_vunmap); + struct drm_gem_object *gem = buffer->fb->obj[0]; -static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) -{ - int ret; - - if (!buffer->fb) - return; - - ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file); - if (ret) - drm_err(buffer->client->dev, - "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret); - - buffer->fb = NULL; -} - -static int drm_client_buffer_addfb(struct drm_client_buffer *buffer, - u32 width, u32 height, u32 format, - u32 handle) -{ - struct drm_client_dev *client = buffer->client; - struct drm_mode_fb_cmd2 fb_req = { }; - int ret; - - fb_req.width = width; - fb_req.height = height; - fb_req.pixel_format = format; - fb_req.handles[0] = handle; - fb_req.pitches[0] = buffer->pitch; - - ret = drm_mode_addfb2(client->dev, &fb_req, client->file); - if (ret) - return ret; - - buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id); - if (WARN_ON(!buffer->fb)) - return -ENOENT; - - /* drop the reference we picked up in framebuffer lookup */ - drm_framebuffer_put(buffer->fb); - - strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN); - - return 0; + drm_gem_vunmap(gem, &buffer->map); } +EXPORT_SYMBOL(drm_client_buffer_vunmap); /** - * drm_client_framebuffer_create - Create a client framebuffer + * drm_client_buffer_create_dumb - Create a client buffer backed by a dumb buffer * @client: DRM client * @width: Framebuffer width * @height: Framebuffer height @@ -386,24 +386,33 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer, * * This function creates a &drm_client_buffer which consists of a * &drm_framebuffer backed by a dumb buffer. - * Call drm_client_framebuffer_delete() to free the buffer. + * Call drm_client_buffer_delete() to free the buffer. * * Returns: * Pointer to a client buffer or an error pointer on failure. */ struct drm_client_buffer * -drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format) +drm_client_buffer_create_dumb(struct drm_client_dev *client, u32 width, u32 height, u32 format) { + const struct drm_format_info *info = drm_format_info(format); + struct drm_device *dev = client->dev; + struct drm_mode_create_dumb dumb_args = { }; struct drm_client_buffer *buffer; - u32 handle; int ret; - buffer = drm_client_buffer_create(client, width, height, format, - &handle); - if (IS_ERR(buffer)) - return buffer; + dumb_args.width = width; + dumb_args.height = height; + dumb_args.bpp = drm_format_info_bpp(info, 0); + ret = drm_mode_create_dumb(dev, &dumb_args, client->file); + if (ret) + return ERR_PTR(ret); - ret = drm_client_buffer_addfb(buffer, width, height, format, handle); + buffer = drm_client_buffer_create(client, width, height, format, + dumb_args.handle, dumb_args.pitch); + if (IS_ERR(buffer)) { + ret = PTR_ERR(buffer); + goto err_drm_mode_destroy_dumb; + } /* * The handle is only needed for creating the framebuffer, destroy it @@ -411,34 +420,19 @@ drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 heig * object as DMA-buf. The framebuffer and our buffer structure are still * holding references to the GEM object to prevent its destruction. */ - drm_mode_destroy_dumb(client->dev, handle, client->file); - - if (ret) { - drm_client_buffer_delete(buffer); - return ERR_PTR(ret); - } + drm_mode_destroy_dumb(client->dev, dumb_args.handle, client->file); return buffer; -} -EXPORT_SYMBOL(drm_client_framebuffer_create); - -/** - * drm_client_framebuffer_delete - Delete a client framebuffer - * @buffer: DRM client buffer (can be NULL) - */ -void drm_client_framebuffer_delete(struct drm_client_buffer *buffer) -{ - if (!buffer) - return; - drm_client_buffer_rmfb(buffer); - drm_client_buffer_delete(buffer); +err_drm_mode_destroy_dumb: + drm_mode_destroy_dumb(client->dev, dumb_args.handle, client->file); + return ERR_PTR(ret); } -EXPORT_SYMBOL(drm_client_framebuffer_delete); +EXPORT_SYMBOL(drm_client_buffer_create_dumb); /** - * drm_client_framebuffer_flush - Manually flush client framebuffer - * @buffer: DRM client buffer (can be NULL) + * drm_client_buffer_flush - Manually flush client buffer + * @buffer: DRM client buffer * @rect: Damage rectangle (if NULL flushes all) * * This calls &drm_framebuffer_funcs->dirty (if present) to flush buffer changes @@ -447,7 +441,7 @@ EXPORT_SYMBOL(drm_client_framebuffer_delete); * Returns: * Zero on success or negative error code on failure. */ -int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect) +int drm_client_buffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect) { if (!buffer || !buffer->fb || !buffer->fb->funcs->dirty) return 0; @@ -467,4 +461,4 @@ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_re return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file, 0, 0, NULL, 0); } -EXPORT_SYMBOL(drm_client_framebuffer_flush); +EXPORT_SYMBOL(drm_client_buffer_flush); diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c index c83196ad8b59..7b3e362f7926 100644 --- a/drivers/gpu/drm/drm_client_event.c +++ b/drivers/gpu/drm/drm_client_event.c @@ -39,12 +39,13 @@ void drm_client_dev_unregister(struct drm_device *dev) mutex_lock(&dev->clientlist_mutex); list_for_each_entry_safe(client, tmp, &dev->clientlist, list) { list_del(&client->list); - if (client->funcs && client->funcs->unregister) { + /* + * Unregistering consumes and frees the client. + */ + if (client->funcs && client->funcs->unregister) client->funcs->unregister(client); - } else { + else drm_client_release(client); - kfree(client); - } } mutex_unlock(&dev->clientlist_mutex); } @@ -101,7 +102,7 @@ void drm_client_dev_hotplug(struct drm_device *dev) } EXPORT_SYMBOL(drm_client_dev_hotplug); -void drm_client_dev_restore(struct drm_device *dev) +void drm_client_dev_restore(struct drm_device *dev, bool force) { struct drm_client_dev *client; int ret; @@ -114,7 +115,7 @@ void drm_client_dev_restore(struct drm_device *dev) if (!client->funcs || !client->funcs->restore) continue; - ret = client->funcs->restore(client); + ret = client->funcs->restore(client, force); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); if (!ret) /* The first one to return zero gets the privilege to restore */ break; @@ -122,7 +123,7 @@ void drm_client_dev_restore(struct drm_device *dev) mutex_unlock(&dev->clientlist_mutex); } -static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +static int drm_client_suspend(struct drm_client_dev *client) { struct drm_device *dev = client->dev; int ret = 0; @@ -131,7 +132,7 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_ return 0; if (client->funcs && client->funcs->suspend) - ret = client->funcs->suspend(client, holds_console_lock); + ret = client->funcs->suspend(client); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); client->suspended = true; @@ -139,20 +140,20 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_ return ret; } -void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock) +void drm_client_dev_suspend(struct drm_device *dev) { struct drm_client_dev *client; mutex_lock(&dev->clientlist_mutex); list_for_each_entry(client, &dev->clientlist, list) { if (!client->suspended) - drm_client_suspend(client, holds_console_lock); + drm_client_suspend(client); } mutex_unlock(&dev->clientlist_mutex); } EXPORT_SYMBOL(drm_client_dev_suspend); -static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock) +static int drm_client_resume(struct drm_client_dev *client) { struct drm_device *dev = client->dev; int ret = 0; @@ -161,7 +162,7 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l return 0; if (client->funcs && client->funcs->resume) - ret = client->funcs->resume(client, holds_console_lock); + ret = client->funcs->resume(client); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); client->suspended = false; @@ -172,14 +173,14 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l return ret; } -void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock) +void drm_client_dev_resume(struct drm_device *dev) { struct drm_client_dev *client; mutex_lock(&dev->clientlist_mutex); list_for_each_entry(client, &dev->clientlist, list) { if (client->suspended) - drm_client_resume(client, holds_console_lock); + drm_client_resume(client); } mutex_unlock(&dev->clientlist_mutex); } diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 9c2c3b0c8c47..fc4caf7da5fc 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -1293,6 +1293,50 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode) } EXPORT_SYMBOL(drm_client_modeset_dpms); +/** + * drm_client_modeset_wait_for_vblank() - Wait for the next VBLANK to occur + * @client: DRM client + * @crtc_index: The ndex of the CRTC to wait on + * + * Block the caller until the given CRTC has seen a VBLANK. Do nothing + * if the CRTC is disabled. If there's another DRM master present, fail + * with -EBUSY. + * + * Returns: + * 0 on success, or negative error code otherwise. + */ +int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index) +{ + struct drm_device *dev = client->dev; + struct drm_crtc *crtc; + int ret; + + /* + * Rate-limit update frequency to vblank. If there's a DRM master + * present, it could interfere while we're waiting for the vblank + * event. Don't wait in this case. + */ + if (!drm_master_internal_acquire(dev)) + return -EBUSY; + + crtc = client->modesets[crtc_index].crtc; + + /* + * Only wait for a vblank event if the CRTC is enabled, otherwise + * just don't do anything, not even report an error. + */ + ret = drm_crtc_vblank_get(crtc); + if (!ret) { + drm_crtc_wait_one_vblank(crtc); + drm_crtc_vblank_put(crtc); + } + + drm_master_internal_release(dev); + + return 0; +} +EXPORT_SYMBOL(drm_client_modeset_wait_for_vblank); + #ifdef CONFIG_DRM_KUNIT_TEST #include "tests/drm_client_modeset_test.c" #endif diff --git a/drivers/gpu/drm/drm_client_sysrq.c b/drivers/gpu/drm/drm_client_sysrq.c new file mode 100644 index 000000000000..eea660096f1b --- /dev/null +++ b/drivers/gpu/drm/drm_client_sysrq.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT + +#include <linux/sysrq.h> + +#include <drm/drm_client_event.h> +#include <drm/drm_device.h> +#include <drm/drm_print.h> + +#include "drm_internal.h" + +#ifdef CONFIG_MAGIC_SYSRQ +static LIST_HEAD(drm_client_sysrq_dev_list); +static DEFINE_MUTEX(drm_client_sysrq_dev_lock); + +/* emergency restore, don't bother with error reporting */ +static void drm_client_sysrq_restore_work_fn(struct work_struct *ignored) +{ + struct drm_device *dev; + + guard(mutex)(&drm_client_sysrq_dev_lock); + + list_for_each_entry(dev, &drm_client_sysrq_dev_list, client_sysrq_list) { + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + continue; + + drm_client_dev_restore(dev, true); + } +} + +static DECLARE_WORK(drm_client_sysrq_restore_work, drm_client_sysrq_restore_work_fn); + +static void drm_client_sysrq_restore_handler(u8 ignored) +{ + schedule_work(&drm_client_sysrq_restore_work); +} + +static const struct sysrq_key_op drm_client_sysrq_restore_op = { + .handler = drm_client_sysrq_restore_handler, + .help_msg = "force-fb(v)", + .action_msg = "Restore framebuffer console", +}; + +void drm_client_sysrq_register(struct drm_device *dev) +{ + guard(mutex)(&drm_client_sysrq_dev_lock); + + if (list_empty(&drm_client_sysrq_dev_list)) + register_sysrq_key('v', &drm_client_sysrq_restore_op); + + list_add(&dev->client_sysrq_list, &drm_client_sysrq_dev_list); +} + +void drm_client_sysrq_unregister(struct drm_device *dev) +{ + guard(mutex)(&drm_client_sysrq_dev_lock); + + /* remove device from global restore list */ + if (!drm_WARN_ON(dev, list_empty(&dev->client_sysrq_list))) + list_del(&dev->client_sysrq_list); + + /* no devices left; unregister key */ + if (list_empty(&drm_client_sysrq_dev_list)) + unregister_sysrq_key('v', &drm_client_sysrq_restore_op); +} +#endif diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index 131c1c9ae92f..c598b99673fc 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -874,3 +874,46 @@ void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_pa fill_palette_8(crtc, i, set_palette); } EXPORT_SYMBOL(drm_crtc_fill_palette_8); + +/** + * drm_color_lut32_check - check validity of extended lookup table + * @lut: property blob containing extended LUT to check + * @tests: bitmask of tests to run + * + * Helper to check whether a userspace-provided extended lookup table is valid and + * satisfies hardware requirements. Drivers pass a bitmask indicating which of + * the tests in &drm_color_lut_tests should be performed. + * + * Returns 0 on success, -EINVAL on failure. + */ +int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests) +{ + const struct drm_color_lut32 *entry; + int i; + + if (!lut || !tests) + return 0; + + entry = lut->data; + for (i = 0; i < drm_color_lut32_size(lut); i++) { + if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) { + if (entry[i].red != entry[i].blue || + entry[i].red != entry[i].green) { + DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n"); + return -EINVAL; + } + } + + if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) { + if (entry[i].red < entry[i - 1].red || + entry[i].green < entry[i - 1].green || + entry[i].blue < entry[i - 1].blue) { + DRM_DEBUG_KMS("LUT entries must never decrease.\n"); + return -EINVAL; + } + } + } + + return 0; +} +EXPORT_SYMBOL(drm_color_lut32_check); diff --git a/drivers/gpu/drm/drm_colorop.c b/drivers/gpu/drm/drm_colorop.c new file mode 100644 index 000000000000..44eb823585d2 --- /dev/null +++ b/drivers/gpu/drm/drm_colorop.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/drm_colorop.h> +#include <drm/drm_print.h> +#include <drm/drm_drv.h> +#include <drm/drm_plane.h> + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * When userspace signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE it + * should use the COLOR_PIPELINE plane property and associated colorops + * for any color operation on the &drm_plane. Setting of all old color + * properties, such as COLOR_ENCODING and COLOR_RANGE, will be rejected + * and the values of the properties will be ignored. + * + * Colorops are only advertised and valid for atomic drivers and atomic + * userspace that signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE + * client cap. + * + * A colorop represents a single color operation. Colorops are chained + * via the NEXT property and make up color pipelines. Color pipelines + * are advertised and selected via the COLOR_PIPELINE &drm_plane + * property. + * + * A colorop will be of a certain type, advertised by the read-only TYPE + * property. Each type of colorop will advertise a different set of + * properties and is programmed in a different manner. Types can be + * enumerated 1D curves, 1D LUTs, 3D LUTs, matrices, etc. See the + * &drm_colorop_type documentation for information on each type. + * + * If a colorop advertises the BYPASS property it can be bypassed. + * + * Information about colorop and color pipeline design decisions can be + * found at rfc/color_pipeline.rst, but note that this document will + * grow stale over time. + */ + +static const struct drm_prop_enum_list drm_colorop_type_enum_list[] = { + { DRM_COLOROP_1D_CURVE, "1D Curve" }, + { DRM_COLOROP_1D_LUT, "1D LUT" }, + { DRM_COLOROP_CTM_3X4, "3x4 Matrix"}, + { DRM_COLOROP_MULTIPLIER, "Multiplier"}, + { DRM_COLOROP_3D_LUT, "3D LUT"}, +}; + +static const char * const colorop_curve_1d_type_names[] = { + [DRM_COLOROP_1D_CURVE_SRGB_EOTF] = "sRGB EOTF", + [DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF] = "sRGB Inverse EOTF", + [DRM_COLOROP_1D_CURVE_PQ_125_EOTF] = "PQ 125 EOTF", + [DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF] = "PQ 125 Inverse EOTF", + [DRM_COLOROP_1D_CURVE_BT2020_INV_OETF] = "BT.2020 Inverse OETF", + [DRM_COLOROP_1D_CURVE_BT2020_OETF] = "BT.2020 OETF", + [DRM_COLOROP_1D_CURVE_GAMMA22] = "Gamma 2.2", + [DRM_COLOROP_1D_CURVE_GAMMA22_INV] = "Gamma 2.2 Inverse", +}; + +static const struct drm_prop_enum_list drm_colorop_lut1d_interpolation_list[] = { + { DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, "Linear" }, +}; + + +static const struct drm_prop_enum_list drm_colorop_lut3d_interpolation_list[] = { + { DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, "Tetrahedral" }, +}; + +/* Init Helpers */ + +static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, enum drm_colorop_type type, + uint32_t flags) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_property *prop; + int ret = 0; + + ret = drm_mode_object_add(dev, &colorop->base, DRM_MODE_OBJECT_COLOROP); + if (ret) + return ret; + + colorop->base.properties = &colorop->properties; + colorop->dev = dev; + colorop->type = type; + colorop->plane = plane; + colorop->next = NULL; + + list_add_tail(&colorop->head, &config->colorop_list); + colorop->index = config->num_colorop++; + + /* add properties */ + + /* type */ + prop = drm_property_create_enum(dev, + DRM_MODE_PROP_IMMUTABLE, + "TYPE", drm_colorop_type_enum_list, + ARRAY_SIZE(drm_colorop_type_enum_list)); + + if (!prop) + return -ENOMEM; + + colorop->type_property = prop; + + drm_object_attach_property(&colorop->base, + colorop->type_property, + colorop->type); + + if (flags & DRM_COLOROP_FLAG_ALLOW_BYPASS) { + /* bypass */ + prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC, + "BYPASS"); + if (!prop) + return -ENOMEM; + + colorop->bypass_property = prop; + drm_object_attach_property(&colorop->base, + colorop->bypass_property, + 1); + } + + /* next */ + prop = drm_property_create_object(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, + "NEXT", DRM_MODE_OBJECT_COLOROP); + if (!prop) + return -ENOMEM; + colorop->next_property = prop; + drm_object_attach_property(&colorop->base, + colorop->next_property, + 0); + + return ret; +} + +/** + * drm_colorop_cleanup - Cleanup a drm_colorop object in color_pipeline + * + * @colorop: The drm_colorop object to be cleaned + */ +void drm_colorop_cleanup(struct drm_colorop *colorop) +{ + struct drm_device *dev = colorop->dev; + struct drm_mode_config *config = &dev->mode_config; + + list_del(&colorop->head); + config->num_colorop--; + + if (colorop->state && colorop->state->data) { + drm_property_blob_put(colorop->state->data); + colorop->state->data = NULL; + } + + kfree(colorop->state); +} +EXPORT_SYMBOL(drm_colorop_cleanup); + +/** + * drm_colorop_pipeline_destroy - Helper for color pipeline destruction + * + * @dev: - The drm_device containing the drm_planes with the color_pipelines + * + * Provides a default color pipeline destroy handler for drm_device. + */ +void drm_colorop_pipeline_destroy(struct drm_device *dev) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_colorop *colorop, *next; + + list_for_each_entry_safe(colorop, next, &config->colorop_list, head) { + drm_colorop_cleanup(colorop); + kfree(colorop); + } +} +EXPORT_SYMBOL(drm_colorop_pipeline_destroy); + +/** + * drm_plane_colorop_curve_1d_init - Initialize a DRM_COLOROP_1D_CURVE + * + * @dev: DRM device + * @colorop: The drm_colorop object to initialize + * @plane: The associated drm_plane + * @supported_tfs: A bitfield of supported drm_plane_colorop_curve_1d_init enum values, + * created using BIT(curve_type) and combined with the OR '|' + * operator. + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. + * @return zero on success, -E value on failure + */ +int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, u64 supported_tfs, uint32_t flags) +{ + struct drm_prop_enum_list enum_list[DRM_COLOROP_1D_CURVE_COUNT]; + int i, len; + + struct drm_property *prop; + int ret; + + if (!supported_tfs) { + drm_err(dev, + "No supported TFs for new 1D curve colorop on [PLANE:%d:%s]\n", + plane->base.id, plane->name); + return -EINVAL; + } + + if ((supported_tfs & -BIT(DRM_COLOROP_1D_CURVE_COUNT)) != 0) { + drm_err(dev, "Unknown TF provided on [PLANE:%d:%s]\n", + plane->base.id, plane->name); + return -EINVAL; + } + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_CURVE, flags); + if (ret) + return ret; + + len = 0; + for (i = 0; i < DRM_COLOROP_1D_CURVE_COUNT; i++) { + if ((supported_tfs & BIT(i)) == 0) + continue; + + enum_list[len].type = i; + enum_list[len].name = colorop_curve_1d_type_names[i]; + len++; + } + + if (WARN_ON(len <= 0)) + return -EINVAL; + + /* initialize 1D curve only attribute */ + prop = drm_property_create_enum(dev, DRM_MODE_PROP_ATOMIC, "CURVE_1D_TYPE", + enum_list, len); + + if (!prop) + return -ENOMEM; + + colorop->curve_1d_type_property = prop; + drm_object_attach_property(&colorop->base, colorop->curve_1d_type_property, + enum_list[0].type); + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_curve_1d_init); + +static int drm_colorop_create_data_prop(struct drm_device *dev, struct drm_colorop *colorop) +{ + struct drm_property *prop; + + /* data */ + prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, + "DATA", 0); + if (!prop) + return -ENOMEM; + + colorop->data_property = prop; + drm_object_attach_property(&colorop->base, + colorop->data_property, + 0); + + return 0; +} + +/** + * drm_plane_colorop_curve_1d_lut_init - Initialize a DRM_COLOROP_1D_LUT + * + * @dev: DRM device + * @colorop: The drm_colorop object to initialize + * @plane: The associated drm_plane + * @lut_size: LUT size supported by driver + * @interpolation: 1D LUT interpolation type + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. + * @return zero on success, -E value on failure + */ +int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t lut_size, + enum drm_colorop_lut1d_interpolation_type interpolation, + uint32_t flags) +{ + struct drm_property *prop; + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_LUT, flags); + if (ret) + return ret; + + /* initialize 1D LUT only attribute */ + /* LUT size */ + prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, + "SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + + colorop->size_property = prop; + drm_object_attach_property(&colorop->base, colorop->size_property, lut_size); + colorop->size = lut_size; + + /* interpolation */ + prop = drm_property_create_enum(dev, 0, "LUT1D_INTERPOLATION", + drm_colorop_lut1d_interpolation_list, + ARRAY_SIZE(drm_colorop_lut1d_interpolation_list)); + if (!prop) + return -ENOMEM; + + colorop->lut1d_interpolation_property = prop; + drm_object_attach_property(&colorop->base, prop, interpolation); + colorop->lut1d_interpolation = interpolation; + + /* data */ + ret = drm_colorop_create_data_prop(dev, colorop); + if (ret) + return ret; + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_curve_1d_lut_init); + +int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t flags) +{ + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_CTM_3X4, flags); + if (ret) + return ret; + + ret = drm_colorop_create_data_prop(dev, colorop); + if (ret) + return ret; + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_ctm_3x4_init); + +/** + * drm_plane_colorop_mult_init - Initialize a DRM_COLOROP_MULTIPLIER + * + * @dev: DRM device + * @colorop: The drm_colorop object to initialize + * @plane: The associated drm_plane + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. + * @return zero on success, -E value on failure + */ +int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t flags) +{ + struct drm_property *prop; + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_MULTIPLIER, flags); + if (ret) + return ret; + + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "MULTIPLIER", 0, U64_MAX); + if (!prop) + return -ENOMEM; + + colorop->multiplier_property = prop; + drm_object_attach_property(&colorop->base, colorop->multiplier_property, 0); + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_mult_init); + +int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, + uint32_t lut_size, + enum drm_colorop_lut3d_interpolation_type interpolation, + uint32_t flags) +{ + struct drm_property *prop; + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_3D_LUT, flags); + if (ret) + return ret; + + /* LUT size */ + prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, + "SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + + colorop->size_property = prop; + drm_object_attach_property(&colorop->base, colorop->size_property, lut_size); + colorop->size = lut_size; + + /* interpolation */ + prop = drm_property_create_enum(dev, 0, "LUT3D_INTERPOLATION", + drm_colorop_lut3d_interpolation_list, + ARRAY_SIZE(drm_colorop_lut3d_interpolation_list)); + if (!prop) + return -ENOMEM; + + colorop->lut3d_interpolation_property = prop; + drm_object_attach_property(&colorop->base, prop, interpolation); + colorop->lut3d_interpolation = interpolation; + + /* data */ + ret = drm_colorop_create_data_prop(dev, colorop); + if (ret) + return ret; + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_3dlut_init); + +static void __drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop, + struct drm_colorop_state *state) +{ + memcpy(state, colorop->state, sizeof(*state)); + + if (state->data) + drm_property_blob_get(state->data); + + state->bypass = true; +} + +struct drm_colorop_state * +drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop) +{ + struct drm_colorop_state *state; + + if (WARN_ON(!colorop->state)) + return NULL; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_colorop_duplicate_state(colorop, state); + + return state; +} + +void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop, + struct drm_colorop_state *state) +{ + kfree(state); +} + +/** + * __drm_colorop_state_reset - resets colorop state to default values + * @colorop_state: atomic colorop state, must not be NULL + * @colorop: colorop object, must not be NULL + * + * Initializes the newly allocated @colorop_state with default + * values. This is useful for drivers that subclass the CRTC state. + */ +static void __drm_colorop_state_reset(struct drm_colorop_state *colorop_state, + struct drm_colorop *colorop) +{ + u64 val; + + colorop_state->colorop = colorop; + colorop_state->bypass = true; + + if (colorop->curve_1d_type_property) { + drm_object_property_get_default_value(&colorop->base, + colorop->curve_1d_type_property, + &val); + colorop_state->curve_1d_type = val; + } +} + +/** + * __drm_colorop_reset - reset state on colorop + * @colorop: drm colorop + * @colorop_state: colorop state to assign + * + * Initializes the newly allocated @colorop_state and assigns it to + * the &drm_crtc->state pointer of @colorop, usually required when + * initializing the drivers or when called from the &drm_colorop_funcs.reset + * hook. + * + * This is useful for drivers that subclass the colorop state. + */ +static void __drm_colorop_reset(struct drm_colorop *colorop, + struct drm_colorop_state *colorop_state) +{ + if (colorop_state) + __drm_colorop_state_reset(colorop_state, colorop); + + colorop->state = colorop_state; +} + +void drm_colorop_reset(struct drm_colorop *colorop) +{ + kfree(colorop->state); + colorop->state = kzalloc(sizeof(*colorop->state), GFP_KERNEL); + + if (colorop->state) + __drm_colorop_reset(colorop, colorop->state); +} + +static const char * const colorop_type_name[] = { + [DRM_COLOROP_1D_CURVE] = "1D Curve", + [DRM_COLOROP_1D_LUT] = "1D LUT", + [DRM_COLOROP_CTM_3X4] = "3x4 Matrix", + [DRM_COLOROP_MULTIPLIER] = "Multiplier", + [DRM_COLOROP_3D_LUT] = "3D LUT", +}; + +static const char * const colorop_lu3d_interpolation_name[] = { + [DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL] = "Tetrahedral", +}; + +static const char * const colorop_lut1d_interpolation_name[] = { + [DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR] = "Linear", +}; + +const char *drm_get_colorop_type_name(enum drm_colorop_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_type_name))) + return "unknown"; + + return colorop_type_name[type]; +} + +const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_curve_1d_type_names))) + return "unknown"; + + return colorop_curve_1d_type_names[type]; +} + +/** + * drm_get_colorop_lut1d_interpolation_name: return a string for interpolation type + * @type: interpolation type to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_lut1d_interpolation_name))) + return "unknown"; + + return colorop_lut1d_interpolation_name[type]; +} + +/** + * drm_get_colorop_lut3d_interpolation_name - return a string for interpolation type + * @type: interpolation type to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_lu3d_interpolation_name))) + return "unknown"; + + return colorop_lu3d_interpolation_name[type]; +} + +/** + * drm_colorop_set_next_property - sets the next pointer + * @colorop: drm colorop + * @next: next colorop + * + * Should be used when constructing the color pipeline + */ +void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next) +{ + drm_object_property_set_value(&colorop->base, + colorop->next_property, + next ? next->base.id : 0); + colorop->next = next; +} +EXPORT_SYMBOL(drm_colorop_set_next_property); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 272d6254ea47..4d6dc9ebfdb5 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -3439,6 +3439,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, * properties reflect the latest status. */ ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, + file_priv->plane_color_pipeline, (uint32_t __user *)(unsigned long)(out_resp->props_ptr), (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), &out_resp->count_props); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 46655339003d..a7797d260f1e 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -229,6 +229,25 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc) * Driver's default scaling filter * Nearest Neighbor: * Nearest Neighbor scaling filter + * SHARPNESS_STRENGTH: + * Atomic property for setting the sharpness strength/intensity by userspace. + * + * The value of this property is set as an integer value ranging + * from 0 - 255 where: + * + * 0: Sharpness feature is disabled(default value). + * + * 1: Minimum sharpness. + * + * 255: Maximum sharpness. + * + * User can gradually increase or decrease the sharpness level and can + * set the optimum value depending on content. + * This value will be passed to kernel through the UAPI. + * The setting of this property does not require modeset. + * The sharpness effect takes place post blending on the final composed output. + * If the feature is disabled, the content remains same without any sharpening effect + * and when this feature is applied, it enhances the clarity of the content. */ __printf(6, 0) @@ -940,6 +959,22 @@ int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc, } EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property); +int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_property *prop = + drm_property_create_range(dev, 0, "SHARPNESS_STRENGTH", 0, 255); + + if (!prop) + return -ENOMEM; + + crtc->sharpness_strength_property = prop; + drm_object_attach_property(&crtc->base, prop, 0); + + return 0; +} +EXPORT_SYMBOL(drm_crtc_create_sharpness_strength_property); + /** * drm_crtc_in_clone_mode - check if the given CRTC state is in clone mode * diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 89706aa8232f..c09409229644 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -163,6 +163,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev, void drm_mode_object_unregister(struct drm_device *dev, struct drm_mode_object *object); int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, + bool plane_color_pipeline, uint32_t __user *prop_ptr, uint64_t __user *prop_values, uint32_t *arg_count_props); diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c index b4fd43783c50..58d0bb6d2676 100644 --- a/drivers/gpu/drm/drm_displayid.c +++ b/drivers/gpu/drm/drm_displayid.c @@ -9,6 +9,34 @@ #include "drm_crtc_internal.h" #include "drm_displayid_internal.h" +enum { + QUIRK_IGNORE_CHECKSUM, +}; + +struct displayid_quirk { + const struct drm_edid_ident ident; + u8 quirks; +}; + +static const struct displayid_quirk quirks[] = { + { + .ident = DRM_EDID_IDENT_INIT('C', 'S', 'O', 5142, "MNE007ZA1-5"), + .quirks = BIT(QUIRK_IGNORE_CHECKSUM), + }, +}; + +static u8 get_quirks(const struct drm_edid *drm_edid) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(quirks); i++) { + if (drm_edid_match(drm_edid, &quirks[i].ident)) + return quirks[i].quirks; + } + + return 0; +} + static const struct displayid_header * displayid_get_header(const u8 *displayid, int length, int index) { @@ -23,7 +51,7 @@ displayid_get_header(const u8 *displayid, int length, int index) } static const struct displayid_header * -validate_displayid(const u8 *displayid, int length, int idx) +validate_displayid(const u8 *displayid, int length, int idx, bool ignore_checksum) { int i, dispid_length; u8 csum = 0; @@ -41,33 +69,35 @@ validate_displayid(const u8 *displayid, int length, int idx) for (i = 0; i < dispid_length; i++) csum += displayid[idx + i]; if (csum) { - DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum); - return ERR_PTR(-EINVAL); + DRM_NOTE("DisplayID checksum invalid, remainder is %d%s\n", csum, + ignore_checksum ? " (ignoring)" : ""); + + if (!ignore_checksum) + return ERR_PTR(-EINVAL); } return base; } -static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid, - int *length, int *idx, - int *ext_index) +static const u8 *find_next_displayid_extension(struct displayid_iter *iter) { const struct displayid_header *base; const u8 *displayid; + bool ignore_checksum = iter->quirks & BIT(QUIRK_IGNORE_CHECKSUM); - displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index); + displayid = drm_edid_find_extension(iter->drm_edid, DISPLAYID_EXT, &iter->ext_index); if (!displayid) return NULL; /* EDID extensions block checksum isn't for us */ - *length = EDID_LENGTH - 1; - *idx = 1; + iter->length = EDID_LENGTH - 1; + iter->idx = 1; - base = validate_displayid(displayid, *length, *idx); + base = validate_displayid(displayid, iter->length, iter->idx, ignore_checksum); if (IS_ERR(base)) return NULL; - *length = *idx + sizeof(*base) + base->bytes; + iter->length = iter->idx + sizeof(*base) + base->bytes; return displayid; } @@ -78,6 +108,7 @@ void displayid_iter_edid_begin(const struct drm_edid *drm_edid, memset(iter, 0, sizeof(*iter)); iter->drm_edid = drm_edid; + iter->quirks = get_quirks(drm_edid); } static const struct displayid_block * @@ -126,10 +157,7 @@ __displayid_iter_next(struct displayid_iter *iter) /* The first section we encounter is the base section */ bool base_section = !iter->section; - iter->section = drm_find_displayid_extension(iter->drm_edid, - &iter->length, - &iter->idx, - &iter->ext_index); + iter->section = find_next_displayid_extension(iter); if (!iter->section) { iter->drm_edid = NULL; return NULL; diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h index 957dd0619f5c..5b1b32f73516 100644 --- a/drivers/gpu/drm/drm_displayid_internal.h +++ b/drivers/gpu/drm/drm_displayid_internal.h @@ -167,6 +167,8 @@ struct displayid_iter { u8 version; u8 primary_use; + + u8 quirks; }; void displayid_iter_edid_begin(const struct drm_edid *drm_edid, diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8e3cb08241c8..2915118436ce 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -733,6 +733,7 @@ static int drm_dev_init(struct drm_device *dev, INIT_LIST_HEAD(&dev->filelist); INIT_LIST_HEAD(&dev->filelist_internal); INIT_LIST_HEAD(&dev->clientlist); + INIT_LIST_HEAD(&dev->client_sysrq_list); INIT_LIST_HEAD(&dev->vblank_event_list); spin_lock_init(&dev->event_lock); @@ -1100,6 +1101,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto err_unload; } drm_panic_register(dev); + drm_client_sysrq_register(dev); DRM_INFO("Initialized %s %d.%d.%d for %s on minor %d\n", driver->name, driver->major, driver->minor, @@ -1144,6 +1146,7 @@ void drm_dev_unregister(struct drm_device *dev) { dev->registered = false; + drm_client_sysrq_unregister(dev); drm_panic_unregister(dev); drm_client_dev_unregister(dev); diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c index 70032bba1c97..e2b62e5fb891 100644 --- a/drivers/gpu/drm/drm_dumb_buffers.c +++ b/drivers/gpu/drm/drm_dumb_buffers.c @@ -25,8 +25,11 @@ #include <drm/drm_device.h> #include <drm/drm_drv.h> +#include <drm/drm_dumb_buffers.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem.h> #include <drm/drm_mode.h> +#include <drm/drm_print.h> #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -57,6 +60,134 @@ * a hardware-specific ioctl to allocate suitable buffer objects. */ +static int drm_mode_align_dumb(struct drm_mode_create_dumb *args, + unsigned long hw_pitch_align, + unsigned long hw_size_align) +{ + u32 pitch = args->pitch; + u32 size; + + if (!pitch) + return -EINVAL; + + if (hw_pitch_align) + pitch = roundup(pitch, hw_pitch_align); + + if (!hw_size_align) + hw_size_align = PAGE_SIZE; + else if (!IS_ALIGNED(hw_size_align, PAGE_SIZE)) + return -EINVAL; /* TODO: handle this if necessary */ + + if (check_mul_overflow(args->height, pitch, &size)) + return -EINVAL; + size = ALIGN(size, hw_size_align); + if (!size) + return -EINVAL; + + args->pitch = pitch; + args->size = size; + + return 0; +} + +/** + * drm_mode_size_dumb - Calculates the scanline and buffer sizes for dumb buffers + * @dev: DRM device + * @args: Parameters for the dumb buffer + * @hw_pitch_align: Hardware scanline alignment in bytes + * @hw_size_align: Hardware buffer-size alignment in bytes + * + * The helper drm_mode_size_dumb() calculates the size of the buffer + * allocation and the scanline size for a dumb buffer. Callers have to + * set the buffers width, height and color mode in the argument @arg. + * The helper validates the correctness of the input and tests for + * possible overflows. If successful, it returns the dumb buffer's + * required scanline pitch and size in &args. + * + * The parameter @hw_pitch_align allows the driver to specifies an + * alignment for the scanline pitch, if the hardware requires any. The + * calculated pitch will be a multiple of the alignment. The parameter + * @hw_size_align allows to specify an alignment for buffer sizes. The + * provided alignment should represent requirements of the graphics + * hardware. drm_mode_size_dumb() handles GEM-related constraints + * automatically across all drivers and hardware. For example, the + * returned buffer size is always a multiple of PAGE_SIZE, which is + * required by mmap(). + * + * Returns: + * Zero on success, or a negative error code otherwise. + */ +int drm_mode_size_dumb(struct drm_device *dev, + struct drm_mode_create_dumb *args, + unsigned long hw_pitch_align, + unsigned long hw_size_align) +{ + u64 pitch = 0; + u32 fourcc; + + /* + * The scanline pitch depends on the buffer width and the color + * format. The latter is specified as a color-mode constant for + * which we first have to find the corresponding color format. + * + * Different color formats can have the same color-mode constant. + * For example XRGB8888 and BGRX8888 both have a color mode of 32. + * It is possible to use different formats for dumb-buffer allocation + * and rendering as long as all involved formats share the same + * color-mode constant. + */ + fourcc = drm_driver_color_mode_format(dev, args->bpp); + if (fourcc != DRM_FORMAT_INVALID) { + const struct drm_format_info *info = drm_format_info(fourcc); + + if (!info) + return -EINVAL; + pitch = drm_format_info_min_pitch(info, 0, args->width); + } else if (args->bpp) { + /* + * Some userspace throws in arbitrary values for bpp and + * relies on the kernel to figure it out. In this case we + * fall back to the old method of using bpp directly. The + * over-commitment of memory from the rounding is acceptable + * for compatibility with legacy userspace. We have a number + * of deprecated legacy values that are explicitly supported. + */ + switch (args->bpp) { + default: + drm_warn_once(dev, + "Unknown color mode %u; guessing buffer size.\n", + args->bpp); + fallthrough; + /* + * These constants represent various YUV formats supported by + * drm_gem_afbc_get_bpp(). + */ + case 12: // DRM_FORMAT_YUV420_8BIT + case 15: // DRM_FORMAT_YUV420_10BIT + case 30: // DRM_FORMAT_VUY101010 + fallthrough; + /* + * Used by Mesa and Gstreamer to allocate NV formats and others + * as RGB buffers. Technically, XRGB16161616F formats are RGB, + * but the dumb buffers are not supposed to be used for anything + * beyond 32 bits per pixels. + */ + case 10: // DRM_FORMAT_NV{15,20,30}, DRM_FORMAT_P010 + case 64: // DRM_FORMAT_{XRGB,XBGR,ARGB,ABGR}16161616F + pitch = args->width * DIV_ROUND_UP(args->bpp, SZ_8); + break; + } + } + + if (!pitch || pitch > U32_MAX) + return -EINVAL; + + args->pitch = pitch; + + return drm_mode_align_dumb(args, hw_pitch_align, hw_size_align); +} +EXPORT_SYMBOL(drm_mode_size_dumb); + int drm_mode_create_dumb(struct drm_device *dev, struct drm_mode_create_dumb *args, struct drm_file *file_priv) @@ -99,7 +230,30 @@ int drm_mode_create_dumb(struct drm_device *dev, int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - return drm_mode_create_dumb(dev, data, file_priv); + struct drm_mode_create_dumb *args = data; + int err; + + err = drm_mode_create_dumb(dev, args, file_priv); + if (err) { + args->handle = 0; + args->pitch = 0; + args->size = 0; + } + return err; +} + +static int drm_mode_mmap_dumb(struct drm_device *dev, struct drm_mode_map_dumb *args, + struct drm_file *file_priv) +{ + if (!dev->driver->dumb_create) + return -ENOSYS; + + if (dev->driver->dumb_map_offset) + return dev->driver->dumb_map_offset(file_priv, dev, args->handle, + &args->offset); + else + return drm_gem_dumb_map_offset(file_priv, dev, args->handle, + &args->offset); } /** @@ -120,17 +274,12 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_map_dumb *args = data; + int err; - if (!dev->driver->dumb_create) - return -ENOSYS; - - if (dev->driver->dumb_map_offset) - return dev->driver->dumb_map_offset(file_priv, dev, - args->handle, - &args->offset); - else - return drm_gem_dumb_map_offset(file_priv, dev, args->handle, - &args->offset); + err = drm_mode_mmap_dumb(dev, args, file_priv); + if (err) + args->offset = 0; + return err; } int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle, diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index e2e85345aa9a..26bb7710a462 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -250,6 +250,9 @@ static const struct edid_quirk { EDID_QUIRK('S', 'V', 'R', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)), EDID_QUIRK('A', 'U', 'O', 0x1111, BIT(EDID_QUIRK_NON_DESKTOP)), + /* LQ116M1JW10 displays noise when 8 bpc, but display fine as 6 bpc */ + EDID_QUIRK('S', 'H', 'P', 0x154c, BIT(EDID_QUIRK_FORCE_6BPC)), + /* * @drm_edid_internal_quirk entries end here, following with the * @drm_edid_quirk entries. diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 11a5b60cb9ce..4a7f72044ab8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -31,9 +31,6 @@ #include <linux/console.h> #include <linux/export.h> -#include <linux/pci.h> -#include <linux/sysrq.h> -#include <linux/vga_switcheroo.h> #include <drm/drm_atomic.h> #include <drm/drm_drv.h> @@ -255,6 +252,7 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, /** * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration * @fb_helper: driver-allocated fbdev helper, can be NULL + * @force: ignore present DRM master * * This helper should be called from fbdev emulation's &drm_client_funcs.restore * callback. It ensures that the user isn't greeted with a black screen when the @@ -263,48 +261,12 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, * Returns: * 0 on success, or a negative errno code otherwise. */ -int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) +int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, bool force) { - return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false); + return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); -#ifdef CONFIG_MAGIC_SYSRQ -/* emergency restore, don't bother with error reporting */ -static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) -{ - struct drm_fb_helper *helper; - - mutex_lock(&kernel_fb_helper_lock); - list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { - struct drm_device *dev = helper->dev; - - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) - continue; - - mutex_lock(&helper->lock); - drm_client_modeset_commit_locked(&helper->client); - mutex_unlock(&helper->lock); - } - mutex_unlock(&kernel_fb_helper_lock); -} - -static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); - -static void drm_fb_helper_sysrq(u8 dummy1) -{ - schedule_work(&drm_fb_helper_restore_work); -} - -static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { - .handler = drm_fb_helper_sysrq, - .help_msg = "force-fb(v)", - .action_msg = "Restore framebuffer console", -}; -#else -static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; -#endif - static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) { struct drm_fb_helper *fb_helper = info->par; @@ -368,6 +330,10 @@ static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper) unsigned long flags; int ret; + mutex_lock(&helper->lock); + drm_client_modeset_wait_for_vblank(&helper->client, 0); + mutex_unlock(&helper->lock); + if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty)) return; @@ -491,20 +457,7 @@ int drm_fb_helper_init(struct drm_device *dev, } EXPORT_SYMBOL(drm_fb_helper_init); -/** - * drm_fb_helper_alloc_info - allocate fb_info and some of its members - * @fb_helper: driver-allocated fbdev helper - * - * A helper to alloc fb_info and the member cmap. Called by the driver - * within the struct &drm_driver.fbdev_probe callback function. Drivers do - * not need to release the allocated fb_info structure themselves, this is - * automatically done when calling drm_fb_helper_fini(). - * - * RETURNS: - * fb_info pointer if things went okay, pointer containing error code - * otherwise - */ -struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) +static struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) { struct device *dev = fb_helper->dev->dev; struct fb_info *info; @@ -531,17 +484,8 @@ err_release: framebuffer_release(info); return ERR_PTR(ret); } -EXPORT_SYMBOL(drm_fb_helper_alloc_info); -/** - * drm_fb_helper_release_info - release fb_info and its members - * @fb_helper: driver-allocated fbdev helper - * - * A helper to release fb_info and the member cmap. Drivers do not - * need to release the allocated fb_info structure themselves, this is - * automatically done when calling drm_fb_helper_fini(). - */ -void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) +static void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) { struct fb_info *info = fb_helper->info; @@ -554,7 +498,6 @@ void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } -EXPORT_SYMBOL(drm_fb_helper_release_info); /** * drm_fb_helper_unregister_info - unregister fb_info framebuffer device @@ -566,11 +509,6 @@ EXPORT_SYMBOL(drm_fb_helper_release_info); */ void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper) { - struct fb_info *info = fb_helper->info; - struct device *dev = info->device; - - if (dev_is_pci(dev)) - vga_switcheroo_client_fb_set(to_pci_dev(dev), NULL); unregister_framebuffer(fb_helper->info); } EXPORT_SYMBOL(drm_fb_helper_unregister_info); @@ -597,11 +535,8 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) drm_fb_helper_release_info(fb_helper); mutex_lock(&kernel_fb_helper_lock); - if (!list_empty(&fb_helper->kernel_fb_list)) { + if (!list_empty(&fb_helper->kernel_fb_list)) list_del(&fb_helper->kernel_fb_list); - if (list_empty(&kernel_fb_helper_list)) - unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); - } mutex_unlock(&kernel_fb_helper_lock); if (!fb_helper->client.funcs) @@ -1068,15 +1003,9 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; - struct drm_crtc *crtc; int ret = 0; - mutex_lock(&fb_helper->lock); - if (!drm_master_internal_acquire(dev)) { - ret = -EBUSY; - goto unlock; - } + guard(mutex)(&fb_helper->lock); switch (cmd) { case FBIO_WAITFORVSYNC: @@ -1096,28 +1025,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, * make. If we're not smart enough here, one should * just consider switch the userspace to KMS. */ - crtc = fb_helper->client.modesets[0].crtc; - - /* - * Only wait for a vblank event if the CRTC is - * enabled, otherwise just don't do anythintg, - * not even report an error. - */ - ret = drm_crtc_vblank_get(crtc); - if (!ret) { - drm_crtc_wait_one_vblank(crtc); - drm_crtc_vblank_put(crtc); - } - - ret = 0; + ret = drm_client_modeset_wait_for_vblank(&fb_helper->client, 0); break; default: ret = -ENOTTY; } - drm_master_internal_release(dev); -unlock: - mutex_unlock(&fb_helper->lock); return ret; } EXPORT_SYMBOL(drm_fb_helper_ioctl); @@ -1346,9 +1259,9 @@ int drm_fb_helper_set_par(struct fb_info *info) * the KDSET IOCTL with KD_TEXT, and only after that drops the master * status when exiting. * - * In the past this was caught by drm_fb_helper_lastclose(), but on - * modern systems where logind always keeps a drm fd open to orchestrate - * the vt switching, this doesn't work. + * In the past this was caught by drm_fb_helper_restore_fbdev_mode_unlocked(), + * but on modern systems where logind always keeps a drm fd open to + * orchestrate the vt switching, this doesn't work. * * To not break the userspace ABI we have this special case here, which * is only used for the above case. Everything else uses the normal @@ -1632,7 +1545,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper) struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; struct drm_fb_helper_surface_size sizes; - struct fb_info *info; int ret; if (drm_WARN_ON(dev, !dev->driver->fbdev_probe)) @@ -1653,12 +1565,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper) strcpy(fb_helper->fb->comm, "[fbcon]"); - info = fb_helper->info; - - /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */ - if (dev_is_pci(info->device)) - vga_switcheroo_client_fb_set(to_pci_dev(info->device), info); - return 0; } @@ -1827,6 +1733,11 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) height = dev->mode_config.max_height; drm_client_modeset_probe(&fb_helper->client, width, height); + + info = drm_fb_helper_alloc_info(fb_helper); + if (IS_ERR(info)) + return PTR_ERR(info); + ret = drm_fb_helper_single_fb_probe(fb_helper); if (ret < 0) { if (ret == -EAGAIN) { @@ -1835,13 +1746,12 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) } mutex_unlock(&fb_helper->lock); - return ret; + goto err_drm_fb_helper_release_info; } drm_setup_crtcs_fb(fb_helper); fb_helper->deferred_setup = false; - info = fb_helper->info; info->var.pixclock = 0; /* Need to drop locks to avoid recursive deadlock in @@ -1857,13 +1767,14 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) info->node, info->fix.id); mutex_lock(&kernel_fb_helper_lock); - if (list_empty(&kernel_fb_helper_list)) - register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); - list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); mutex_unlock(&kernel_fb_helper_lock); return 0; + +err_drm_fb_helper_release_info: + drm_fb_helper_release_info(fb_helper); + return ret; } /** @@ -1973,16 +1884,3 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) return 0; } EXPORT_SYMBOL(drm_fb_helper_hotplug_event); - -/** - * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation - * @dev: DRM device - * - * This function is obsolete. Call drm_fb_helper_restore_fbdev_mode_unlocked() - * instead. - */ -void drm_fb_helper_lastclose(struct drm_device *dev) -{ - drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper); -} -EXPORT_SYMBOL(drm_fb_helper_lastclose); diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 8bd626ef16c7..9412d9fdd74b 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -10,6 +10,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> /* * struct fb_ops @@ -55,10 +56,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info) drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_dma_fb_ops = { @@ -90,10 +89,8 @@ static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info) vfree(shadow); drm_client_buffer_vunmap(fb_helper->buffer); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = { @@ -272,9 +269,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct fb_info *info = fb_helper->info; struct drm_client_buffer *buffer; struct drm_framebuffer *fb; - struct fb_info *info; u32 format; struct iosys_map map; int ret; @@ -285,7 +282,7 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, + buffer = drm_client_buffer_create_dumb(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); @@ -304,12 +301,6 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, fb_helper->buffer = buffer; fb_helper->fb = fb; - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_client_buffer_vunmap; - } - drm_fb_helper_fill_info(info, fb_helper, sizes); if (fb->funcs->dirty) @@ -317,18 +308,16 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, else ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes); if (ret) - goto err_drm_fb_helper_release_info; + goto err_drm_client_buffer_vunmap; return 0; -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); err_drm_client_buffer_vunmap: fb_helper->fb = NULL; fb_helper->buffer = NULL; drm_client_buffer_vunmap(buffer); err_drm_client_buffer_delete: - drm_client_framebuffer_delete(buffer); + drm_client_buffer_delete(buffer); return ret; } EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c index 1e827bf8b815..458c899b5d4f 100644 --- a/drivers/gpu/drm/drm_fbdev_shmem.c +++ b/drivers/gpu/drm/drm_fbdev_shmem.c @@ -9,6 +9,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_print.h> /* * struct fb_ops @@ -63,10 +64,8 @@ static void drm_fbdev_shmem_fb_destroy(struct fb_info *info) drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_shmem_fb_ops = { @@ -136,10 +135,10 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct fb_info *info = fb_helper->info; struct drm_client_buffer *buffer; struct drm_gem_shmem_object *shmem; struct drm_framebuffer *fb; - struct fb_info *info; u32 format; struct iosys_map map; int ret; @@ -149,7 +148,7 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, sizes->surface_bpp); format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, + buffer = drm_client_buffer_create_dumb(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); @@ -169,12 +168,6 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, fb_helper->buffer = buffer; fb_helper->fb = fb; - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_client_buffer_vunmap; - } - drm_fb_helper_fill_info(info, fb_helper, sizes); info->fbops = &drm_fbdev_shmem_fb_ops; @@ -195,18 +188,16 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, info->fbdefio = &fb_helper->fbdefio; ret = fb_deferred_io_init(info); if (ret) - goto err_drm_fb_helper_release_info; + goto err_drm_client_buffer_vunmap; return 0; -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); err_drm_client_buffer_vunmap: fb_helper->fb = NULL; fb_helper->buffer = NULL; drm_client_buffer_vunmap(buffer); err_drm_client_buffer_delete: - drm_client_framebuffer_delete(buffer); + drm_client_buffer_delete(buffer); return ret; } EXPORT_SYMBOL(drm_fbdev_shmem_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c index 85feb55bba11..160bc35d8738 100644 --- a/drivers/gpu/drm/drm_fbdev_ttm.c +++ b/drivers/gpu/drm/drm_fbdev_ttm.c @@ -50,11 +50,9 @@ static void drm_fbdev_ttm_fb_destroy(struct fb_info *info) fb_deferred_io_cleanup(info); drm_fb_helper_fini(fb_helper); vfree(shadow); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_ttm_fb_ops = { @@ -176,8 +174,8 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct fb_info *info = fb_helper->info; struct drm_client_buffer *buffer; - struct fb_info *info; size_t screen_size; void *screen_buffer; u32 format; @@ -189,7 +187,7 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, + buffer = drm_client_buffer_create_dumb(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); @@ -202,13 +200,7 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, screen_buffer = vzalloc(screen_size); if (!screen_buffer) { ret = -ENOMEM; - goto err_drm_client_framebuffer_delete; - } - - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_vfree; + goto err_drm_client_buffer_delete; } drm_fb_helper_fill_info(info, fb_helper, sizes); @@ -227,18 +219,16 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, info->fbdefio = &fb_helper->fbdefio; ret = fb_deferred_io_init(info); if (ret) - goto err_drm_fb_helper_release_info; + goto err_vfree; return 0; -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); err_vfree: vfree(screen_buffer); -err_drm_client_framebuffer_delete: +err_drm_client_buffer_delete: fb_helper->fb = NULL; fb_helper->buffer = NULL; - drm_client_framebuffer_delete(buffer); + drm_client_buffer_delete(buffer); return ret; } EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index eebd1a05ee97..be5e617ceb9f 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -405,7 +405,7 @@ EXPORT_SYMBOL(drm_open); static void drm_lastclose(struct drm_device *dev) { - drm_client_dev_restore(dev); + drm_client_dev_restore(dev, false); if (dev_is_pci(dev->dev)) vga_switcheroo_process_delayed_switch(); diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 006836554cc2..6cddf05c493b 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -1165,97 +1165,6 @@ void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_ } EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444); -/** - * drm_fb_blit - Copy parts of a framebuffer to display memory - * @dst: Array of display-memory addresses to copy to - * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines - * within @dst; can be NULL if scanlines are stored next to each other. - * @dst_format: FOURCC code of the display's color format - * @src: The framebuffer memory to copy from - * @fb: The framebuffer to copy from - * @clip: Clip rectangle area to copy - * @state: Transform and conversion state - * - * This function copies parts of a framebuffer to display memory. If the - * formats of the display and the framebuffer mismatch, the blit function - * will attempt to convert between them during the process. The parameters @dst, - * @dst_pitch and @src refer to arrays. Each array must have at least as many - * entries as there are planes in @dst_format's format. Each entry stores the - * value for the format's respective color plane at the same index. - * - * This function does not apply clipping on @dst (i.e. the destination is at the - * top-left corner). - * - * Returns: - * 0 on success, or - * -EINVAL if the color-format conversion failed, or - * a negative error code otherwise. - */ -int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, - const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, struct drm_format_conv_state *state) -{ - uint32_t fb_format = fb->format->format; - - if (fb_format == dst_format) { - drm_fb_memcpy(dst, dst_pitch, src, fb, clip); - return 0; - } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (fb_format == DRM_FORMAT_XRGB8888) { - if (dst_format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XRGB1555) { - drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB1555) { - drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGBA5551) { - drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_BGR888) { - drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB8888) { - drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XBGR8888) { - drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ABGR8888) { - drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XRGB2101010) { - drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB2101010) { - drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_BGRX8888) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGB332) { - drm_fb_xrgb8888_to_rgb332(dst, dst_pitch, src, fb, clip, state); - return 0; - } - } - - drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n", - &fb_format, &dst_format); - - return -EINVAL; -} -EXPORT_SYMBOL(drm_fb_blit); - static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels) { u8 *dbuf8 = dbuf; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index adbb73f00d68..18e753ade001 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -1048,7 +1048,7 @@ retry: plane_state->crtc->base.id, plane_state->crtc->name, fb->base.id); - crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); ret = drm_atomic_add_affected_connectors(state, plane_state->crtc); if (ret) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index f884d155a832..efc79bbf3c73 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -101,10 +101,8 @@ drm_gem_init(struct drm_device *dev) vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), GFP_KERNEL); - if (!vma_offset_manager) { - DRM_ERROR("out of memory\n"); + if (!vma_offset_manager) return -ENOMEM; - } dev->vma_offset_manager = vma_offset_manager; drm_vma_offset_manager_init(vma_offset_manager, @@ -785,9 +783,9 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count, int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out) { - int ret; - u32 *handles; struct drm_gem_object **objs; + u32 *handles; + int ret; if (!count) return 0; @@ -799,20 +797,11 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, *objs_out = objs; - handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); - if (!handles) { - ret = -ENOMEM; - goto out; - } - - if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { - ret = -EFAULT; - DRM_DEBUG("Failed to copy in GEM handles\n"); - goto out; - } + handles = vmemdup_array_user(bo_handles, count, sizeof(u32)); + if (IS_ERR(handles)) + return PTR_ERR(handles); ret = objects_lookup(filp, handles, count, objs); -out: kvfree(handles); return ret; @@ -855,12 +844,13 @@ EXPORT_SYMBOL(drm_gem_object_lookup); long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout) { - long ret; + struct drm_device *dev = filep->minor->dev; struct drm_gem_object *obj; + long ret; obj = drm_gem_object_lookup(filep, handle); if (!obj) { - DRM_DEBUG("Failed to look up GEM BO %d\n", handle); + drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index 6fb55601252f..569d41a65a0b 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -338,8 +338,6 @@ void drm_gem_reset_shadow_plane(struct drm_plane *plane) } shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL); - if (!shadow_plane_state) - return; __drm_gem_reset_shadow_plane(plane, shadow_plane_state); } EXPORT_SYMBOL(drm_gem_reset_shadow_plane); diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index 4f0320df858f..12d8307997a0 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -20,7 +20,9 @@ #include <drm/drm.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_vma_manager.h> /** @@ -304,9 +306,11 @@ int drm_gem_dma_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct drm_gem_dma_object *dma_obj; + int ret; - args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); - args->size = args->pitch * args->height; + ret = drm_mode_size_dumb(drm, args, SZ_8, 0); + if (ret) + return ret; dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size, &args->handle); @@ -582,7 +586,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev, ret = dma_buf_vmap_unlocked(attach->dmabuf, &map); if (ret) { - DRM_ERROR("Failed to vmap PRIME buffer\n"); + drm_err(dev, "Failed to vmap PRIME buffer\n"); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 4bc89d33df59..9fd4eb02a20f 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -16,6 +16,7 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_print.h> #include "drm_internal.h" diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 5d1349c34afd..dc94a27710e5 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -18,6 +18,7 @@ #include <drm/drm.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_prime.h> #include <drm/drm_print.h> @@ -48,28 +49,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { .vm_ops = &drm_gem_shmem_vm_ops, }; -static struct drm_gem_shmem_object * -__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, - struct vfsmount *gemfs) +static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, + size_t size, bool private, struct vfsmount *gemfs) { - struct drm_gem_shmem_object *shmem; - struct drm_gem_object *obj; + struct drm_gem_object *obj = &shmem->base; int ret = 0; - size = PAGE_ALIGN(size); - - if (dev->driver->gem_create_object) { - obj = dev->driver->gem_create_object(dev, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - shmem = to_drm_gem_shmem_obj(obj); - } else { - shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); - if (!shmem) - return ERR_PTR(-ENOMEM); - obj = &shmem->base; - } - if (!obj->funcs) obj->funcs = &drm_gem_shmem_funcs; @@ -81,7 +66,7 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, } if (ret) { drm_gem_private_object_fini(obj); - goto err_free; + return ret; } ret = drm_gem_create_mmap_offset(obj); @@ -102,14 +87,55 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, __GFP_RETRY_MAYFAIL | __GFP_NOWARN); } - return shmem; - + return 0; err_release: drm_gem_object_release(obj); -err_free: - kfree(obj); + return ret; +} - return ERR_PTR(ret); +/** + * drm_gem_shmem_init - Initialize an allocated object. + * @dev: DRM device + * @obj: The allocated shmem GEM object. + * + * Returns: + * 0 on success, or a negative error code on failure. + */ +int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size) +{ + return __drm_gem_shmem_init(dev, shmem, size, false, NULL); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_init); + +static struct drm_gem_shmem_object * +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, + struct vfsmount *gemfs) +{ + struct drm_gem_shmem_object *shmem; + struct drm_gem_object *obj; + int ret = 0; + + size = PAGE_ALIGN(size); + + if (dev->driver->gem_create_object) { + obj = dev->driver->gem_create_object(dev, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + shmem = to_drm_gem_shmem_obj(obj); + } else { + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) + return ERR_PTR(-ENOMEM); + obj = &shmem->base; + } + + ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs); + if (ret) { + kfree(obj); + return ERR_PTR(ret); + } + + return shmem; } /** * drm_gem_shmem_create - Allocate an object with the given size @@ -150,13 +176,13 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt); /** - * drm_gem_shmem_free - Free resources associated with a shmem GEM object - * @shmem: shmem GEM object to free + * drm_gem_shmem_release - Release resources associated with a shmem GEM object. + * @shmem: shmem GEM object * - * This function cleans up the GEM object state and frees the memory used to - * store the object itself. + * This function cleans up the GEM object state, but does not free the memory used to store the + * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings. */ -void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; @@ -183,6 +209,19 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } drm_gem_object_release(obj); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_release); + +/** + * drm_gem_shmem_free - Free resources associated with a shmem GEM object + * @shmem: shmem GEM object to free + * + * This function cleans up the GEM object state and frees the memory used to + * store the object itself. + */ +void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) +{ + drm_gem_shmem_release(shmem); kfree(shmem); } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); @@ -518,18 +557,11 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked); int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + int ret; - if (!args->pitch || !args->size) { - args->pitch = min_pitch; - args->size = PAGE_ALIGN(args->pitch * args->height); - } else { - /* ensure sane minimum values */ - if (args->pitch < min_pitch) - args->pitch = min_pitch; - if (args->size < args->pitch * args->height) - args->size = PAGE_ALIGN(args->pitch * args->height); - } + ret = drm_mode_size_dumb(dev, args, SZ_8, 0); + if (ret) + return ret; return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); } diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index 257cca4cb97a..08ff0fadd0b2 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -4,6 +4,7 @@ #include <linux/module.h> #include <drm/drm_gem_ttm_helper.h> +#include <drm/drm_print.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_tt.h> diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index b04cde4a60e7..5e5b70518dbe 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -17,6 +17,7 @@ #include <drm/drm_mode.h> #include <drm/drm_plane.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_tt.h> @@ -107,7 +108,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) { - /* We got here via ttm_bo_put(), which means that the + /* We got here via ttm_bo_fini(), which means that the * TTM buffer object in 'bo' has already been cleaned * up; only release the GEM object. */ @@ -234,11 +235,11 @@ EXPORT_SYMBOL(drm_gem_vram_create); * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object * @gbo: the GEM VRAM object * - * See ttm_bo_put() for more information. + * See ttm_bo_fini() for more information. */ void drm_gem_vram_put(struct drm_gem_vram_object *gbo) { - ttm_bo_put(&gbo->bo); + ttm_bo_fini(&gbo->bo); } EXPORT_SYMBOL(drm_gem_vram_put); @@ -859,7 +860,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev, dev->anon_inode->i_mapping, dev->vma_offset_manager, - false, true); + TTM_ALLOCATION_POOL_USE_DMA32); if (ret) return ret; @@ -967,7 +968,7 @@ drm_vram_helper_mode_valid_internal(struct drm_device *dev, max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT; - fbsize = mode->hdisplay * mode->vdisplay * max_bpp; + fbsize = (u32)mode->hdisplay * mode->vdisplay * max_bpp; fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE); if (fbpages > max_fbpages) diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index cb906765897e..73e550c8ff8c 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -1363,7 +1363,8 @@ map_pages: order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages); if (is_device_private_page(page) || is_device_coherent_page(page)) { - if (zdd != page->zone_device_data && i > 0) { + if (!ctx->allow_mixed && + zdd != page->zone_device_data && i > 0) { err = -EOPNOTSUPP; goto err_unmap; } @@ -1399,7 +1400,8 @@ map_pages: } else { dma_addr_t addr; - if (is_zone_device_page(page) || pagemap) { + if (is_zone_device_page(page) || + (pagemap && !ctx->allow_mixed)) { err = -EOPNOTSUPP; goto err_unmap; } diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index af63f4d00315..8a06d296561d 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -26,6 +26,7 @@ */ #include <drm/drm_gpuvm.h> +#include <drm/drm_print.h> #include <linux/export.h> #include <linux/interval_tree_generic.h> @@ -877,6 +878,31 @@ __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock, } /** + * drm_gpuvm_bo_is_zombie() - check whether this vm_bo is scheduled for cleanup + * @vm_bo: the &drm_gpuvm_bo + * + * When a vm_bo is scheduled for cleanup using the bo_defer list, it is not + * immediately removed from the evict and extobj lists. Therefore, anyone + * iterating these lists should skip entries that are being destroyed. + * + * Checking the refcount without incrementing it is okay as long as the lock + * protecting the evict/extobj list is held for as long as you are using the + * vm_bo, because even if the refcount hits zero while you are using it, freeing + * the vm_bo requires taking the list's lock. + * + * Zombie entries can be observed on the evict and extobj lists regardless of + * whether DRM_GPUVM_RESV_PROTECTED is used, but they remain on the lists for a + * longer time when the resv lock is used because we can't take the resv lock + * during run_job() in immediate mode, meaning that they need to remain on the + * lists until drm_gpuvm_bo_deferred_cleanup() is called. + */ +static bool +drm_gpuvm_bo_is_zombie(struct drm_gpuvm_bo *vm_bo) +{ + return !kref_read(&vm_bo->kref); +} + +/** * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list * @__vm_bo: the &drm_gpuvm_bo * @__list_name: the name of the list to insert into @@ -1081,6 +1107,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, INIT_LIST_HEAD(&gpuvm->evict.list); spin_lock_init(&gpuvm->evict.lock); + init_llist_head(&gpuvm->bo_defer); + kref_init(&gpuvm->kref); gpuvm->name = name ? name : "unknown"; @@ -1122,6 +1150,8 @@ drm_gpuvm_fini(struct drm_gpuvm *gpuvm) "Extobj list should be empty.\n"); drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list), "Evict list should be empty.\n"); + drm_WARN(gpuvm->drm, !llist_empty(&gpuvm->bo_defer), + "VM BO cleanup list should be empty.\n"); drm_gem_object_put(gpuvm->r_obj); } @@ -1217,6 +1247,9 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm, drm_gpuvm_resv_assert_held(gpuvm); list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) { + if (drm_gpuvm_bo_is_zombie(vm_bo)) + continue; + ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); if (ret) break; @@ -1460,6 +1493,9 @@ drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec) list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list, list.entry.evict) { + if (drm_gpuvm_bo_is_zombie(vm_bo)) + continue; + ret = ops->vm_bo_validate(vm_bo, exec); if (ret) break; @@ -1560,6 +1596,7 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, INIT_LIST_HEAD(&vm_bo->list.entry.extobj); INIT_LIST_HEAD(&vm_bo->list.entry.evict); + init_llist_node(&vm_bo->list.entry.bo_defer); return vm_bo; } @@ -1621,6 +1658,126 @@ drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo) } EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put); +/* + * drm_gpuvm_bo_into_zombie() - called when the vm_bo becomes a zombie due to + * deferred cleanup + * + * If deferred cleanup is used, then this must be called right after the vm_bo + * refcount drops to zero. Must be called with GEM mutex held. After releasing + * the GEM mutex, drm_gpuvm_bo_defer_zombie_cleanup() must be called. + */ +static void +drm_gpuvm_bo_into_zombie(struct kref *kref) +{ + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, + kref); + + if (!drm_gpuvm_resv_protected(vm_bo->vm)) { + drm_gpuvm_bo_list_del(vm_bo, extobj, true); + drm_gpuvm_bo_list_del(vm_bo, evict, true); + } + + list_del(&vm_bo->list.entry.gem); +} + +/* + * drm_gpuvm_bo_defer_zombie_cleanup() - adds a new zombie vm_bo to the + * bo_defer list + * + * Called after drm_gpuvm_bo_into_zombie(). GEM mutex must not be held. + * + * It's important that the GEM stays alive for the duration in which we hold + * the mutex, but the instant we add the vm_bo to bo_defer, another thread + * might call drm_gpuvm_bo_deferred_cleanup() and put the GEM. Therefore, to + * avoid kfreeing a mutex we are holding, the GEM mutex must be released + * *before* calling this function. + */ +static void +drm_gpuvm_bo_defer_zombie_cleanup(struct drm_gpuvm_bo *vm_bo) +{ + llist_add(&vm_bo->list.entry.bo_defer, &vm_bo->vm->bo_defer); +} + +static void +drm_gpuvm_bo_defer_free(struct kref *kref) +{ + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, + kref); + + drm_gpuvm_bo_into_zombie(kref); + mutex_unlock(&vm_bo->obj->gpuva.lock); + drm_gpuvm_bo_defer_zombie_cleanup(vm_bo); +} + +/** + * drm_gpuvm_bo_put_deferred() - drop a struct drm_gpuvm_bo reference with + * deferred cleanup + * @vm_bo: the &drm_gpuvm_bo to release the reference of + * + * This releases a reference to @vm_bo. + * + * This might take and release the GEMs GPUVA lock. You should call + * drm_gpuvm_bo_deferred_cleanup() later to complete the cleanup process. + * + * Returns: true if vm_bo is being destroyed, false otherwise. + */ +bool +drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo) +{ + if (!vm_bo) + return false; + + drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm)); + + return !!kref_put_mutex(&vm_bo->kref, + drm_gpuvm_bo_defer_free, + &vm_bo->obj->gpuva.lock); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred); + +/** + * drm_gpuvm_bo_deferred_cleanup() - clean up BOs in the deferred list + * deferred cleanup + * @gpuvm: the VM to clean up + * + * Cleans up &drm_gpuvm_bo instances in the deferred cleanup list. + */ +void +drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm) +{ + const struct drm_gpuvm_ops *ops = gpuvm->ops; + struct drm_gpuvm_bo *vm_bo; + struct drm_gem_object *obj; + struct llist_node *bo_defer; + + bo_defer = llist_del_all(&gpuvm->bo_defer); + if (!bo_defer) + return; + + if (drm_gpuvm_resv_protected(gpuvm)) { + dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL); + llist_for_each_entry(vm_bo, bo_defer, list.entry.bo_defer) { + drm_gpuvm_bo_list_del(vm_bo, extobj, false); + drm_gpuvm_bo_list_del(vm_bo, evict, false); + } + dma_resv_unlock(drm_gpuvm_resv(gpuvm)); + } + + while (bo_defer) { + vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer); + bo_defer = bo_defer->next; + obj = vm_bo->obj; + if (ops && ops->vm_bo_free) + ops->vm_bo_free(vm_bo); + else + kfree(vm_bo); + + drm_gpuvm_put(gpuvm); + drm_gem_object_put(obj); + } +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup); + static struct drm_gpuvm_bo * __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj) @@ -1949,6 +2106,40 @@ drm_gpuva_unlink(struct drm_gpuva *va) EXPORT_SYMBOL_GPL(drm_gpuva_unlink); /** + * drm_gpuva_unlink_defer() - unlink a &drm_gpuva with deferred vm_bo cleanup + * @va: the &drm_gpuva to unlink + * + * Similar to drm_gpuva_unlink(), but uses drm_gpuvm_bo_put_deferred() and takes + * the lock for the caller. + */ +void +drm_gpuva_unlink_defer(struct drm_gpuva *va) +{ + struct drm_gem_object *obj = va->gem.obj; + struct drm_gpuvm_bo *vm_bo = va->vm_bo; + bool should_defer_bo; + + if (unlikely(!obj)) + return; + + drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm)); + + mutex_lock(&obj->gpuva.lock); + list_del_init(&va->gem.entry); + + /* + * This is drm_gpuvm_bo_put_deferred() except we already hold the mutex. + */ + should_defer_bo = kref_put(&vm_bo->kref, drm_gpuvm_bo_into_zombie); + mutex_unlock(&obj->gpuva.lock); + if (should_defer_bo) + drm_gpuvm_bo_defer_zombie_cleanup(vm_bo); + + va->vm_bo = NULL; +} +EXPORT_SYMBOL_GPL(drm_gpuva_unlink_defer); + +/** * drm_gpuva_find_first() - find the first &drm_gpuva in the given range * @gpuvm: the &drm_gpuvm to search in * @addr: the &drm_gpuvas address diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 5a3bed48ab1f..f893b1e3a596 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -56,6 +56,17 @@ static inline void drm_client_debugfs_init(struct drm_device *dev) { } #endif +/* drm_client_sysrq.c */ +#if defined(CONFIG_DRM_CLIENT) && defined(CONFIG_MAGIC_SYSRQ) +void drm_client_sysrq_register(struct drm_device *dev); +void drm_client_sysrq_unregister(struct drm_device *dev); +#else +static inline void drm_client_sysrq_register(struct drm_device *dev) +{ } +static inline void drm_client_sysrq_unregister(struct drm_device *dev) +{ } +#endif + /* drm_file.c */ extern struct mutex drm_global_mutex; bool drm_dev_needs_global_mutex(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index d8a24875a7ba..ff193155129e 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -373,6 +373,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) return -EINVAL; file_priv->supports_virtualized_cursor_plane = req->value; break; + case DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE: + if (!file_priv->atomic) + return -EINVAL; + if (req->value > 1) + return -EINVAL; + file_priv->plane_color_pipeline = req->value; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index e33c78fc8fbd..00482227a9cd 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -26,6 +26,7 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modes.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> #include <video/mipi_display.h> @@ -691,7 +692,7 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev, const struct drm_simple_display_pipe_funcs *funcs, const struct drm_display_mode *mode, unsigned int rotation) { - size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16); + size_t bufsize = (u32)mode->vdisplay * mode->hdisplay * sizeof(u16); dbidev->drm.mode_config.preferred_depth = 16; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index ca254611b382..6692abe564d3 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -49,6 +49,7 @@ #include <linux/stacktrace.h> #include <drm/drm_mm.h> +#include <drm/drm_print.h> /** * DOC: Overview diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 25f376869b3a..d12db9b0bab8 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -30,6 +30,7 @@ #include <drm/drm_managed.h> #include <drm/drm_mode_config.h> #include <drm/drm_print.h> +#include <drm/drm_colorop.h> #include <linux/dma-resv.h> #include "drm_crtc_internal.h" @@ -192,11 +193,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data, void drm_mode_config_reset(struct drm_device *dev) { struct drm_crtc *crtc; + struct drm_colorop *colorop; struct drm_plane *plane; struct drm_encoder *encoder; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; + drm_for_each_colorop(colorop, dev) + drm_colorop_reset(colorop); + drm_for_each_plane(plane, dev) if (plane->funcs->reset) plane->funcs->reset(plane); @@ -437,6 +442,7 @@ int drmm_mode_config_init(struct drm_device *dev) INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); INIT_LIST_HEAD(&dev->mode_config.plane_list); + INIT_LIST_HEAD(&dev->mode_config.colorop_list); INIT_LIST_HEAD(&dev->mode_config.privobj_list); idr_init_base(&dev->mode_config.object_idr, 1); idr_init_base(&dev->mode_config.tile_idr, 1); @@ -458,6 +464,7 @@ int drmm_mode_config_init(struct drm_device *dev) dev->mode_config.num_crtc = 0; dev->mode_config.num_encoder = 0; dev->mode_config.num_total_plane = 0; + dev->mode_config.num_colorop = 0; if (IS_ENABLED(CONFIG_LOCKDEP)) { struct drm_modeset_acquire_ctx modeset_ctx; diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index e943205a2394..b45d501b10c8 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -28,6 +28,7 @@ #include <drm/drm_device.h> #include <drm/drm_file.h> #include <drm/drm_mode_object.h> +#include <drm/drm_plane.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" @@ -386,6 +387,7 @@ EXPORT_SYMBOL(drm_object_property_get_default_value); /* helper for getconnector and getproperties ioctls */ int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, + bool plane_color_pipeline, uint32_t __user *prop_ptr, uint64_t __user *prop_values, uint32_t *arg_count_props) @@ -399,6 +401,21 @@ int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic) continue; + if (plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) { + struct drm_plane *plane = obj_to_plane(obj); + + if (prop == plane->color_encoding_property || + prop == plane->color_range_property) + continue; + } + + if (!plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) { + struct drm_plane *plane = obj_to_plane(obj); + + if (prop == plane->color_pipeline_property) + continue; + } + if (*arg_count_props > count) { ret = __drm_object_property_get_value(obj, prop, &val); if (ret) @@ -457,6 +474,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, } ret = drm_mode_object_get_properties(obj, file_priv->atomic, + file_priv->plane_color_pipeline, (uint32_t __user *)(unsigned long)(arg->props_ptr), (uint64_t __user *)(unsigned long)(arg->prop_values_ptr), &arg->count_props); diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index 988735560570..a57f6a10ada4 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -203,10 +203,10 @@ int drm_mode_config_helper_suspend(struct drm_device *dev) if (dev->mode_config.poll_enabled) drm_kms_helper_poll_disable(dev); - drm_client_dev_suspend(dev, false); + drm_client_dev_suspend(dev); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { - drm_client_dev_resume(dev, false); + drm_client_dev_resume(dev); /* * Don't enable polling if it was never initialized @@ -252,7 +252,7 @@ int drm_mode_config_helper_resume(struct drm_device *dev) DRM_ERROR("Failed to resume (%d)\n", ret); dev->mode_config.suspend_state = NULL; - drm_client_dev_resume(dev, false); + drm_client_dev_resume(dev); /* * Don't enable polling if it is not initialized diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 38f82391bfda..d2c211f66c9e 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -210,7 +210,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev, formats_size = sizeof(__u32) * plane->format_count; if (WARN_ON(!formats_size)) { /* 0 formats are never expected */ - return 0; + return ERR_PTR(-EINVAL); } modifiers_size = @@ -226,7 +226,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev, blob = drm_property_create_blob(dev, blob_size, NULL); if (IS_ERR(blob)) - return NULL; + return blob; blob_data = blob->data; blob_data->version = FORMAT_BLOB_CURRENT; @@ -1820,3 +1820,62 @@ int drm_plane_add_size_hints_property(struct drm_plane *plane, return 0; } EXPORT_SYMBOL(drm_plane_add_size_hints_property); + +/** + * drm_plane_create_color_pipeline_property - create a new color pipeline + * property + * + * @plane: drm plane + * @pipelines: list of pipelines + * @num_pipelines: number of pipelines + * + * Create the COLOR_PIPELINE plane property to specific color pipelines on + * the plane. + * + * RETURNS: + * Zero for success or -errno + */ +int drm_plane_create_color_pipeline_property(struct drm_plane *plane, + const struct drm_prop_enum_list *pipelines, + int num_pipelines) +{ + struct drm_prop_enum_list *all_pipelines; + struct drm_property *prop; + int len = 0; + int i; + + all_pipelines = kcalloc(num_pipelines + 1, + sizeof(*all_pipelines), + GFP_KERNEL); + + if (!all_pipelines) { + drm_err(plane->dev, "failed to allocate color pipeline\n"); + return -ENOMEM; + } + + /* Create default Bypass color pipeline */ + all_pipelines[len].type = 0; + all_pipelines[len].name = "Bypass"; + len++; + + /* Add all other color pipelines */ + for (i = 0; i < num_pipelines; i++, len++) { + all_pipelines[len].type = pipelines[i].type; + all_pipelines[len].name = pipelines[i].name; + } + + prop = drm_property_create_enum(plane->dev, DRM_MODE_PROP_ATOMIC, + "COLOR_PIPELINE", + all_pipelines, len); + if (IS_ERR(prop)) { + kfree(all_pipelines); + return PTR_ERR(prop); + } + + drm_object_attach_property(&plane->base, prop, 0); + plane->color_pipeline_property = prop; + + kfree(all_pipelines); + return 0; +} +EXPORT_SYMBOL(drm_plane_create_color_pipeline_property); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 43a10b4af43a..21809a82187b 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -37,6 +37,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include "drm_internal.h" diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 46f59883183d..5c14140cd0c2 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -136,8 +136,17 @@ * vblanks after a timer has expired, which can be configured through the * ``vblankoffdelay`` module parameter. * - * Drivers for hardware without support for vertical-blanking interrupts - * must not call drm_vblank_init(). For such drivers, atomic helpers will + * Drivers for hardware without support for vertical-blanking interrupts can + * use DRM vblank timers to send vblank events at the rate of the current + * display mode's refresh. While not synchronized to the hardware's + * vertical-blanking regions, the timer helps DRM clients and compositors to + * adapt their update cycle to the display output. Drivers should set up + * vblanking as usual, but call drm_crtc_vblank_start_timer() and + * drm_crtc_vblank_cancel_timer() as part of their atomic mode setting. + * See also DRM vblank helpers for more information. + * + * Drivers without support for vertical-blanking interrupts nor timers must + * not call drm_vblank_init(). For these drivers, atomic helpers will * automatically generate fake vblank events as part of the display update. * This functionality also can be controlled by the driver by enabling and * disabling struct drm_crtc_state.no_vblank. @@ -508,6 +517,9 @@ static void drm_vblank_init_release(struct drm_device *dev, void *ptr) drm_WARN_ON(dev, READ_ONCE(vblank->enabled) && drm_core_check_feature(dev, DRIVER_MODESET)); + if (vblank->vblank_timer.crtc) + hrtimer_cancel(&vblank->vblank_timer.timer); + drm_vblank_destroy_worker(vblank); timer_delete_sync(&vblank->disable_timer); } @@ -794,10 +806,8 @@ drm_crtc_vblank_helper_get_vblank_timestamp_internal( ts_vblank_time = ktime_to_timespec64(*vblank_time); drm_dbg_vbl(dev, - "crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n", - pipe, hpos, vpos, - (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000, - (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000, + "crtc %u : v p(%d,%d)@ %ptSp -> %ptSp [e %d us, %d rep]\n", + pipe, hpos, vpos, &ts_etime, &ts_vblank_time, duration_ns / 1000, i); return true; @@ -1303,7 +1313,7 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe) ret = wait_event_timeout(vblank->queue, last != drm_vblank_count(dev, pipe), - msecs_to_jiffies(100)); + msecs_to_jiffies(1000)); drm_WARN(dev, ret == 0, "vblank wait timed out on crtc %i\n", pipe); @@ -2162,3 +2172,159 @@ err_free: return ret; } +/* + * VBLANK timer + */ + +static enum hrtimer_restart drm_vblank_timer_function(struct hrtimer *timer) +{ + struct drm_vblank_crtc_timer *vtimer = + container_of(timer, struct drm_vblank_crtc_timer, timer); + struct drm_crtc *crtc = vtimer->crtc; + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + struct drm_device *dev = crtc->dev; + unsigned long flags; + ktime_t interval; + u64 ret_overrun; + bool succ; + + spin_lock_irqsave(&vtimer->interval_lock, flags); + interval = vtimer->interval; + spin_unlock_irqrestore(&vtimer->interval_lock, flags); + + if (!interval) + return HRTIMER_NORESTART; + + ret_overrun = hrtimer_forward_now(&vtimer->timer, interval); + if (ret_overrun != 1) + drm_dbg_vbl(dev, "vblank timer overrun\n"); + + if (crtc_funcs->handle_vblank_timeout) + succ = crtc_funcs->handle_vblank_timeout(crtc); + else + succ = drm_crtc_handle_vblank(crtc); + if (!succ) + return HRTIMER_NORESTART; + + return HRTIMER_RESTART; +} + +/** + * drm_crtc_vblank_start_timer - Starts the vblank timer on the given CRTC + * @crtc: the CRTC + * + * Drivers should call this function from their CRTC's enable_vblank + * function to start a vblank timer. The timer will fire after the duration + * of a full frame. drm_crtc_vblank_cancel_timer() disables a running timer. + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_crtc_vblank_start_timer(struct drm_crtc *crtc) +{ + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer; + unsigned long flags; + + if (!vtimer->crtc) { + /* + * Set up the data structures on the first invocation. + */ + vtimer->crtc = crtc; + spin_lock_init(&vtimer->interval_lock); + hrtimer_setup(&vtimer->timer, drm_vblank_timer_function, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + } else { + /* + * Timer should not be active. If it is, wait for the + * previous cancel operations to finish. + */ + while (hrtimer_active(&vtimer->timer)) + hrtimer_try_to_cancel(&vtimer->timer); + } + + drm_calc_timestamping_constants(crtc, &crtc->mode); + + spin_lock_irqsave(&vtimer->interval_lock, flags); + vtimer->interval = ns_to_ktime(vblank->framedur_ns); + spin_unlock_irqrestore(&vtimer->interval_lock, flags); + + hrtimer_start(&vtimer->timer, vtimer->interval, HRTIMER_MODE_REL); + + return 0; +} +EXPORT_SYMBOL(drm_crtc_vblank_start_timer); + +/** + * drm_crtc_vblank_cancel_timer - Cancels the given CRTC's vblank timer + * @crtc: the CRTC + * + * Drivers should call this function from their CRTC's disable_vblank + * function to stop a vblank timer. + */ +void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc) +{ + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer; + unsigned long flags; + + /* + * Calling hrtimer_cancel() can result in a deadlock with DRM's + * vblank_time_lime_lock and hrtimers' softirq_expiry_lock. So + * clear interval and indicate cancellation. The timer function + * will cancel itself on the next invocation. + */ + + spin_lock_irqsave(&vtimer->interval_lock, flags); + vtimer->interval = 0; + spin_unlock_irqrestore(&vtimer->interval_lock, flags); + + hrtimer_try_to_cancel(&vtimer->timer); +} +EXPORT_SYMBOL(drm_crtc_vblank_cancel_timer); + +/** + * drm_crtc_vblank_get_vblank_timeout - Returns the vblank timeout + * @crtc: The CRTC + * @vblank_time: Returns the next vblank timestamp + * + * The helper drm_crtc_vblank_get_vblank_timeout() returns the next vblank + * timestamp of the CRTC's vblank timer according to the timer's expiry + * time. + */ +void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time) +{ + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer; + u64 cur_count; + ktime_t cur_time; + + if (!READ_ONCE(vblank->enabled)) { + *vblank_time = ktime_get(); + return; + } + + /* + * A concurrent vblank timeout could update the expires field before + * we compare it with the vblank time. Hence we'd compare the old + * expiry time to the new vblank time; deducing the timer had already + * expired. Reread until we get consistent values from both fields. + */ + do { + cur_count = drm_crtc_vblank_count_and_time(crtc, &cur_time); + *vblank_time = READ_ONCE(vtimer->timer.node.expires); + } while (cur_count != drm_crtc_vblank_count_and_time(crtc, &cur_time)); + + if (drm_WARN_ON(crtc->dev, !ktime_compare(*vblank_time, cur_time))) + return; /* Already expired */ + + /* + * To prevent races we roll the hrtimer forward before we do any + * interrupt processing - this is how real hw works (the interrupt + * is only generated after all the vblank registers are updated) + * and what the vblank core expects. Therefore we need to always + * correct the timestamp by one frame. + */ + *vblank_time = ktime_sub(*vblank_time, vtimer->interval); +} +EXPORT_SYMBOL(drm_crtc_vblank_get_vblank_timeout); diff --git a/drivers/gpu/drm/drm_vblank_helper.c b/drivers/gpu/drm/drm_vblank_helper.c new file mode 100644 index 000000000000..a04a6ba1b0ca --- /dev/null +++ b/drivers/gpu/drm/drm_vblank_helper.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: MIT + +#include <drm/drm_atomic.h> +#include <drm/drm_crtc.h> +#include <drm/drm_managed.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> + +/** + * DOC: overview + * + * The vblank helper library provides functions for supporting vertical + * blanking in DRM drivers. + * + * For vblank timers, several callback implementations are available. + * Drivers enable support for vblank timers by setting the vblank callbacks + * in struct &drm_crtc_funcs to the helpers provided by this library. The + * initializer macro DRM_CRTC_VBLANK_TIMER_FUNCS does this conveniently. + * The driver further has to send the VBLANK event from its atomic_flush + * callback and control vblank from the CRTC's atomic_enable and atomic_disable + * callbacks. The callbacks are located in struct &drm_crtc_helper_funcs. + * The vblank helper library provides implementations of these callbacks + * for drivers without further requirements. The initializer macro + * DRM_CRTC_HELPER_VBLANK_FUNCS sets them coveniently. + * + * Once the driver enables vblank support with drm_vblank_init(), each + * CRTC's vblank timer fires according to the programmed display mode. By + * default, the vblank timer invokes drm_crtc_handle_vblank(). Drivers with + * more specific requirements can set their own handler function in + * struct &drm_crtc_helper_funcs.handle_vblank_timeout. + */ + +/* + * VBLANK helpers + */ + +/** + * drm_crtc_vblank_atomic_flush - + * Implements struct &drm_crtc_helper_funcs.atomic_flush + * @crtc: The CRTC + * @state: The atomic state to apply + * + * The helper drm_crtc_vblank_atomic_flush() implements atomic_flush of + * struct drm_crtc_helper_funcs for CRTCs that only need to send out a + * VBLANK event. + * + * See also struct &drm_crtc_helper_funcs.atomic_flush. + */ +void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + struct drm_pending_vblank_event *event; + + spin_lock_irq(&dev->event_lock); + + event = crtc_state->event; + crtc_state->event = NULL; + + if (event) { + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + } + + spin_unlock_irq(&dev->event_lock); +} +EXPORT_SYMBOL(drm_crtc_vblank_atomic_flush); + +/** + * drm_crtc_vblank_atomic_enable - Implements struct &drm_crtc_helper_funcs.atomic_enable + * @crtc: The CRTC + * @state: The atomic state + * + * The helper drm_crtc_vblank_atomic_enable() implements atomic_enable + * of struct drm_crtc_helper_funcs for CRTCs the only need to enable VBLANKs. + * + * See also struct &drm_crtc_helper_funcs.atomic_enable. + */ +void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + drm_crtc_vblank_on(crtc); +} +EXPORT_SYMBOL(drm_crtc_vblank_atomic_enable); + +/** + * drm_crtc_vblank_atomic_disable - Implements struct &drm_crtc_helper_funcs.atomic_disable + * @crtc: The CRTC + * @state: The atomic state + * + * The helper drm_crtc_vblank_atomic_disable() implements atomic_disable + * of struct drm_crtc_helper_funcs for CRTCs the only need to disable VBLANKs. + * + * See also struct &drm_crtc_funcs.atomic_disable. + */ +void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + drm_crtc_vblank_off(crtc); +} +EXPORT_SYMBOL(drm_crtc_vblank_atomic_disable); + +/* + * VBLANK timer + */ + +/** + * drm_crtc_vblank_helper_enable_vblank_timer - Implements struct &drm_crtc_funcs.enable_vblank + * @crtc: The CRTC + * + * The helper drm_crtc_vblank_helper_enable_vblank_timer() implements + * enable_vblank of struct drm_crtc_helper_funcs for CRTCs that require + * a VBLANK timer. It sets up the timer on the first invocation. The + * started timer expires after the current frame duration. See struct + * &drm_vblank_crtc.framedur_ns. + * + * See also struct &drm_crtc_helper_funcs.enable_vblank. + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc) +{ + return drm_crtc_vblank_start_timer(crtc); +} +EXPORT_SYMBOL(drm_crtc_vblank_helper_enable_vblank_timer); + +/** + * drm_crtc_vblank_helper_disable_vblank_timer - Implements struct &drm_crtc_funcs.disable_vblank + * @crtc: The CRTC + * + * The helper drm_crtc_vblank_helper_disable_vblank_timer() implements + * disable_vblank of struct drm_crtc_funcs for CRTCs that require a + * VBLANK timer. + * + * See also struct &drm_crtc_helper_funcs.disable_vblank. + */ +void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc) +{ + drm_crtc_vblank_cancel_timer(crtc); +} +EXPORT_SYMBOL(drm_crtc_vblank_helper_disable_vblank_timer); + +/** + * drm_crtc_vblank_helper_get_vblank_timestamp_from_timer - + * Implements struct &drm_crtc_funcs.get_vblank_timestamp + * @crtc: The CRTC + * @max_error: Maximum acceptable error + * @vblank_time: Returns the next vblank timestamp + * @in_vblank_irq: True is called from drm_crtc_handle_vblank() + * + * The helper drm_crtc_helper_get_vblank_timestamp_from_timer() implements + * get_vblank_timestamp of struct drm_crtc_funcs for CRTCs that require a + * VBLANK timer. It returns the timestamp according to the timer's expiry + * time. + * + * See also struct &drm_crtc_funcs.get_vblank_timestamp. + * + * Returns: + * True on success, or false otherwise. + */ +bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq) +{ + drm_crtc_vblank_get_vblank_timeout(crtc, vblank_time); + + return true; +} +EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_from_timer); diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c index e4e1873f0e1e..70f0199251ea 100644 --- a/drivers/gpu/drm/drm_vblank_work.c +++ b/drivers/gpu/drm/drm_vblank_work.c @@ -244,7 +244,7 @@ EXPORT_SYMBOL(drm_vblank_work_flush); void drm_vblank_work_flush_all(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(crtc)]; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); spin_lock_irq(&dev->event_lock); wait_event_lock_irq(vblank->work_wait_queue, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 88385dc3b30d..ad5e6f7b23f9 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -4,6 +4,7 @@ */ #include <drm/drm_drv.h> +#include <drm/drm_print.h> #include "etnaviv_cmdbuf.h" #include "etnaviv_gpu.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 3e91747ed339..54ceae87b401 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -17,6 +17,7 @@ #include <drm/drm_ioctl.h> #include <drm/drm_of.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include "etnaviv_cmdbuf.h" #include "etnaviv_drv.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 2f844e82bc46..5d8f3b03d4ae 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -4,6 +4,7 @@ */ #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <linux/dma-mapping.h> #include <linux/shmem_fs.h> #include <linux/spinlock.h> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 76c742328edb..a9611c1a773f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -4,6 +4,7 @@ */ #include <drm/drm_file.h> +#include <drm/drm_print.h> #include <linux/dma-fence-array.h> #include <linux/file.h> #include <linux/dma-resv.h> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index cf0d9049bcf1..ca0be293f5fe 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -16,6 +16,8 @@ #include <linux/reset.h> #include <linux/thermal.h> +#include <drm/drm_print.h> + #include "etnaviv_cmdbuf.h" #include "etnaviv_dump.h" #include "etnaviv_gpu.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c index 8665f2658d51..32d710baf17f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c @@ -198,6 +198,38 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { }, { .model = 0x8000, + .revision = 0x6205, + .product_id = 0x80003, + .customer_id = 0x15, + .eco_id = 0, + .stream_count = 16, + .register_max = 64, + .thread_count = 512, + .shader_core_count = 2, + .nn_core_count = 2, + .vertex_cache_size = 16, + .vertex_output_buffer_size = 1024, + .pixel_pipes = 1, + .instruction_count = 512, + .num_constants = 320, + .buffer_size = 0, + .varyings_count = 16, + .features = 0xe0287c8d, + .minor_features0 = 0xc1799eff, + .minor_features1 = 0xfefbfad9, + .minor_features2 = 0xeb9d4fbf, + .minor_features3 = 0xedfffced, + .minor_features4 = 0xdb0dafc7, + .minor_features5 = 0x7b5ac333, + .minor_features6 = 0xfcce6000, + .minor_features7 = 0x03fbfa6f, + .minor_features8 = 0x00ef0ef0, + .minor_features9 = 0x0eca703c, + .minor_features10 = 0x898048f0, + .minor_features11 = 0x00000034, + }, + { + .model = 0x8000, .revision = 0x7120, .product_id = 0x45080009, .customer_id = 0x88, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index df5192083b20..a992be2ede88 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -6,6 +6,8 @@ #include <linux/dma-mapping.h> #include <linux/scatterlist.h> +#include <drm/drm_print.h> + #include "common.xml.h" #include "etnaviv_cmdbuf.h" #include "etnaviv_drv.h" diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index b9e206303b48..9ae0fa4667a9 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -20,6 +20,7 @@ #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "exynos_drm_crtc.h" diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index b8d9b7251319..bb74b17f9753 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -20,6 +20,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include <drm/exynos_drm.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index ddd73e7f26a3..6ecd95bcb0c4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -14,6 +14,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/exynos_drm.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 93de25b77e68..637927818dfe 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -16,6 +16,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" @@ -42,8 +43,6 @@ static void exynos_drm_fb_destroy(struct fb_info *info) drm_framebuffer_remove(fb); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops exynos_drm_fb_ops = { @@ -59,18 +58,11 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes, struct exynos_drm_gem *exynos_gem) { - struct fb_info *fbi; + struct fb_info *fbi = helper->info; struct drm_framebuffer *fb = helper->fb; unsigned int size = fb->width * fb->height * fb->format->cpp[0]; unsigned long offset; - fbi = drm_fb_helper_alloc_info(helper); - if (IS_ERR(fbi)) { - DRM_DEV_ERROR(to_dma_dev(helper->dev), - "failed to allocate fb info.\n"); - return PTR_ERR(fbi); - } - fbi->fbops = &exynos_drm_fb_ops; drm_fb_helper_fill_info(fbi, helper, sizes); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 205c238cc73a..b6abdc4f2b0a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -23,6 +23,7 @@ #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include <drm/exynos_drm.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index d32f2474cbaa..2bea107dd960 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -21,6 +21,7 @@ #include <linux/workqueue.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index e3fbb45f37a2..b9b2f000072d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -10,7 +10,9 @@ #include <linux/shmem_fs.h> #include <linux/module.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/drm_vma_manager.h> #include <drm/exynos_drm.h> @@ -329,15 +331,16 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, unsigned int flags; int ret; + ret = drm_mode_size_dumb(dev, args, 0, 0); + if (ret) + return ret; + /* * allocate memory to be used for framebuffer. * - this callback would be called by user application * with DRM_IOCTL_MODE_CREATE_DUMB command. */ - args->pitch = args->width * ((args->bpp + 7) / 8); - args->size = args->pitch * args->height; - if (is_drm_iommu_supported(dev)) flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; else diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 03c8490af4f4..008def51225a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -22,6 +22,7 @@ #include <drm/drm_file.h> #include <drm/drm_fourcc.h> #include <drm/drm_mode.h> +#include <drm/drm_print.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 7c3aa77186d3..67afddd566e2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include <drm/exynos_drm.h> #include "exynos_drm_crtc.h" @@ -58,7 +59,7 @@ static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state) struct drm_plane_state *state = &exynos_state->base; struct drm_crtc *crtc = state->crtc; struct drm_crtc_state *crtc_state = - drm_atomic_get_existing_crtc_state(state->state, crtc); + drm_atomic_get_new_crtc_state(state->state, crtc); struct drm_display_mode *mode = &crtc_state->adjusted_mode; int crtc_x, crtc_y; unsigned int crtc_w, crtc_h; diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index e094b8bbc0f1..64c69dd2966e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -14,6 +14,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index a3670d2eaab2..69dea5049309 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -28,6 +28,7 @@ #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include <drm/exynos_drm.h> diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 794a87d16f88..a9a341ea6507 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -15,6 +15,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "fsl_dcu_drm_drv.h" diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c index 8711a7a5b8da..c8f1716a12d5 100644 --- a/drivers/gpu/drm/gma500/backlight.c +++ b/drivers/gpu/drm/gma500/backlight.c @@ -11,6 +11,8 @@ #include <acpi/video.h> +#include <drm/drm_print.h> + #include "psb_drv.h" #include "psb_intel_reg.h" #include "psb_intel_drv.h" diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index 718d45891fc7..fd6ea8998dbe 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -9,6 +9,7 @@ #include <drm/drm.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_print.h> #include "cdv_device.h" #include "gma_device.h" diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index bbd0abdd8382..5942a9d46b02 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -11,6 +11,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include "cdv_device.h" #include "framebuffer.h" diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index c85143792019..54bf626f0524 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -34,6 +34,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "gma_display.h" diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index f2a3e37ef632..8e93ee0d0ccd 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -31,6 +31,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "cdv_device.h" diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 9276e3676ba0..fbe7fe317393 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -14,6 +14,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "cdv_device.h" diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c index 32d31e5f5f1a..c26926babc2a 100644 --- a/drivers/gpu/drm/gma500/fbdev.c +++ b/drivers/gpu/drm/gma500/fbdev.c @@ -50,48 +50,6 @@ static const struct vm_operations_struct psb_fbdev_vm_ops = { * struct fb_ops */ -#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) - -static int psb_fbdev_fb_setcolreg(unsigned int regno, - unsigned int red, unsigned int green, - unsigned int blue, unsigned int transp, - struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - uint32_t v; - - if (!fb) - return -ENOMEM; - - if (regno > 255) - return 1; - - red = CMAP_TOHW(red, info->var.red.length); - blue = CMAP_TOHW(blue, info->var.blue.length); - green = CMAP_TOHW(green, info->var.green.length); - transp = CMAP_TOHW(transp, info->var.transp.length); - - v = (red << info->var.red.offset) | - (green << info->var.green.offset) | - (blue << info->var.blue.offset) | - (transp << info->var.transp.offset); - - if (regno < 16) { - switch (fb->format->cpp[0] * 8) { - case 16: - ((uint32_t *) info->pseudo_palette)[regno] = v; - break; - case 24: - case 32: - ((uint32_t *) info->pseudo_palette)[regno] = v; - break; - } - } - - return 0; -} - static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { if (vma->vm_pgoff != 0) @@ -126,16 +84,12 @@ static void psb_fbdev_fb_destroy(struct fb_info *info) drm_gem_object_put(obj); drm_client_release(&fb_helper->client); - - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops psb_fbdev_fb_ops = { .owner = THIS_MODULE, __FB_DEFAULT_IOMEM_OPS_RDWR, DRM_FB_HELPER_DEFAULT_OPS, - .fb_setcolreg = psb_fbdev_fb_setcolreg, __FB_DEFAULT_IOMEM_OPS_DRAW, .fb_mmap = psb_fbdev_fb_mmap, .fb_destroy = psb_fbdev_fb_destroy, @@ -154,7 +108,7 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct drm_device *dev = fb_helper->dev; struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); - struct fb_info *info; + struct fb_info *info = fb_helper->info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd = { }; int size; @@ -213,12 +167,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, fb_helper->funcs = &psb_fbdev_fb_helper_funcs; fb_helper->fb = fb; - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_framebuffer_unregister_private; - } - info->fbops = &psb_fbdev_fb_ops; /* Accessed stolen memory directly */ @@ -242,10 +190,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, return 0; -err_drm_framebuffer_unregister_private: - drm_framebuffer_unregister_private(fb); - drm_framebuffer_cleanup(fb); - kfree(fb); err_drm_gem_object_put: drm_gem_object_put(obj); return ret; diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 4b7627a72637..2e44a2ac2742 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -16,6 +16,7 @@ #include <asm/set_memory.h> #include <drm/drm.h> +#include <drm/drm_print.h> #include <drm/drm_vma_manager.h> #include "gem.h" diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c index d5924ca3ed05..b60720560830 100644 --- a/drivers/gpu/drm/gma500/intel_bios.c +++ b/drivers/gpu/drm/gma500/intel_bios.c @@ -8,6 +8,7 @@ #include <drm/display/drm_dp_helper.h> #include <drm/drm.h> +#include <drm/drm_print.h> #include "intel_bios.h" #include "psb_drv.h" diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c index ee8b047587f2..2b06ba22f9c6 100644 --- a/drivers/gpu/drm/gma500/intel_gmbus.c +++ b/drivers/gpu/drm/gma500/intel_gmbus.c @@ -32,6 +32,8 @@ #include <linux/i2c.h> #include <linux/module.h> +#include <drm/drm_print.h> + #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c index cba97d7db131..0326f3ddc621 100644 --- a/drivers/gpu/drm/gma500/mid_bios.c +++ b/drivers/gpu/drm/gma500/mid_bios.c @@ -12,6 +12,7 @@ */ #include <drm/drm.h> +#include <drm/drm_print.h> #include "mid_bios.h" #include "psb_drv.h" diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index ea9b41af0867..086d14678a8e 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -10,6 +10,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include "framebuffer.h" #include "gem.h" diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index c0feca58511d..20d027d552c7 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -30,6 +30,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "psb_drv.h" diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c index 6daa6669ed23..48e8ac560a2a 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c @@ -30,6 +30,9 @@ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> + +#include <drm/drm_print.h> + #include "psb_drv.h" #define HDMI_READ(reg) readl(hdmi_dev->regs + (reg)) diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 72191d6f0d06..0705ba3813e6 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -13,6 +13,7 @@ #include <drm/drm_edid.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "intel_bios.h" diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c index 0c271072af63..5f0daa25b86d 100644 --- a/drivers/gpu/drm/gma500/opregion.c +++ b/drivers/gpu/drm/gma500/opregion.c @@ -22,6 +22,9 @@ * */ #include <linux/acpi.h> + +#include <drm/drm_print.h> + #include "psb_drv.h" #include "psb_irq.h" #include "psb_intel_reg.h" diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 85d3557c2eb9..005ab7f5355f 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -25,6 +25,7 @@ #include <drm/drm_file.h> #include <drm/drm_ioctl.h> #include <drm/drm_pciids.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "framebuffer.h" diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index ff46e88c4768..1ff2bd23db74 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -11,6 +11,7 @@ #include <drm/drm_modeset_helper.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include "framebuffer.h" #include "gem.h" diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 9ad611b5956e..f8f3c42e67a7 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -13,6 +13,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "intel_bios.h" diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index afda40fc4494..553e7c7d9bb8 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -36,6 +36,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include "psb_drv.h" #include "psb_intel_drv.h" diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index 7bbb79b0497d..3a946b472064 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -9,6 +9,7 @@ **************************************************************************/ #include <drm/drm_drv.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "power.h" @@ -249,6 +250,7 @@ static irqreturn_t gma_irq_handler(int irq, void *arg) void gma_irq_preinstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + struct drm_crtc *crtc; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); @@ -259,10 +261,15 @@ void gma_irq_preinstall(struct drm_device *dev) PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE); PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); - if (dev->vblank[0].enabled) - dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; - if (dev->vblank[1].enabled) - dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; + drm_for_each_crtc(crtc, dev) { + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + + if (vblank->enabled) { + u32 mask = drm_crtc_index(crtc) ? _PSB_VSYNC_PIPEB_FLAG : + _PSB_VSYNC_PIPEA_FLAG; + dev_priv->vdc_irq_mask |= mask; + } + } /* Revisit this area - want per device masks ? */ if (dev_priv->ops->hotplug) @@ -277,8 +284,8 @@ void gma_irq_preinstall(struct drm_device *dev) void gma_irq_postinstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + struct drm_crtc *crtc; unsigned long irqflags; - unsigned int i; spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); @@ -291,11 +298,13 @@ void gma_irq_postinstall(struct drm_device *dev) PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - for (i = 0; i < dev->num_crtcs; ++i) { - if (dev->vblank[i].enabled) - gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + drm_for_each_crtc(crtc, dev) { + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + + if (vblank->enabled) + gma_enable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); else - gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); } if (dev_priv->ops->hotplug_enable) @@ -336,8 +345,8 @@ void gma_irq_uninstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); + struct drm_crtc *crtc; unsigned long irqflags; - unsigned int i; if (!dev_priv->irq_enabled) return; @@ -349,9 +358,11 @@ void gma_irq_uninstall(struct drm_device *dev) PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - for (i = 0; i < dev->num_crtcs; ++i) { - if (dev->vblank[i].enabled) - gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + drm_for_each_crtc(crtc, dev) { + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + + if (vblank->enabled) + gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); } dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c index 4a15695fa933..1726a3fadff8 100644 --- a/drivers/gpu/drm/gud/gud_connector.c +++ b/drivers/gpu/drm/gud/gud_connector.c @@ -561,11 +561,11 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn continue; /* not a DRM property */ property = gud_connector_property_lookup(connector, prop); - if (WARN_ON(IS_ERR(property))) + if (drm_WARN_ON(drm, IS_ERR(property))) continue; state_val = gud_connector_tv_state_val(prop, &gconn->initial_tv_state); - if (WARN_ON(IS_ERR(state_val))) + if (drm_WARN_ON(drm, IS_ERR(state_val))) continue; *state_val = val; @@ -593,7 +593,7 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state, unsigned int *state_val; state_val = gud_connector_tv_state_val(prop, &connector_state->tv); - if (WARN_ON_ONCE(IS_ERR(state_val))) + if (drm_WARN_ON_ONCE(connector_state->connector->dev, IS_ERR(state_val))) return PTR_ERR(state_val); val = *state_val; @@ -667,7 +667,7 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index, return ret; } - if (WARN_ON(connector->index != index)) + if (drm_WARN_ON(drm, connector->index != index)) return -EINVAL; if (flags & GUD_CONNECTOR_FLAGS_POLL_STATUS) diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c index b7345c8d823d..42135a48d92e 100644 --- a/drivers/gpu/drm/gud/gud_drv.c +++ b/drivers/gpu/drm/gud/gud_drv.c @@ -249,7 +249,7 @@ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val) return gud_usb_set(gdrm, request, 0, &val, sizeof(val)); } -static int gud_get_properties(struct gud_device *gdrm) +static int gud_plane_add_properties(struct gud_device *gdrm) { struct gud_property_req *properties; unsigned int i, num_properties; @@ -463,10 +463,6 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) return PTR_ERR(gdrm); drm = &gdrm->drm; - drm->mode_config.funcs = &gud_mode_config_funcs; - ret = drmm_mode_config_init(drm); - if (ret) - return ret; gdrm->flags = le32_to_cpu(desc.flags); gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4; @@ -483,11 +479,28 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) if (ret) return ret; + usb_set_intfdata(intf, gdrm); + + dma_dev = usb_intf_get_dma_device(intf); + if (dma_dev) { + drm_dev_set_dma_dev(drm, dma_dev); + put_device(dma_dev); + } else { + dev_warn(dev, "buffer sharing not supported"); /* not an error */ + } + + /* Mode config init */ + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + drm->mode_config.min_width = le32_to_cpu(desc.min_width); drm->mode_config.max_width = le32_to_cpu(desc.max_width); drm->mode_config.min_height = le32_to_cpu(desc.min_height); drm->mode_config.max_height = le32_to_cpu(desc.max_height); + drm->mode_config.funcs = &gud_mode_config_funcs; + /* Format init */ formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL); /* Add room for emulated XRGB8888 */ formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL); @@ -587,6 +600,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) return -ENOMEM; } + /* Pipeline init */ ret = drm_universal_plane_init(drm, &gdrm->plane, 0, &gud_plane_funcs, formats, num_formats, @@ -598,12 +612,9 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) drm_plane_helper_add(&gdrm->plane, &gud_plane_helper_funcs); drm_plane_enable_fb_damage_clips(&gdrm->plane); - devm_kfree(dev, formats); - devm_kfree(dev, formats_dev); - - ret = gud_get_properties(gdrm); + ret = gud_plane_add_properties(gdrm); if (ret) { - dev_err(dev, "Failed to get properties (error=%d)\n", ret); + dev_err(dev, "Failed to add properties (error=%d)\n", ret); return ret; } @@ -621,16 +632,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) } drm_mode_config_reset(drm); - - usb_set_intfdata(intf, gdrm); - - dma_dev = usb_intf_get_dma_device(intf); - if (dma_dev) { - drm_dev_set_dma_dev(drm, dma_dev); - put_device(dma_dev); - } else { - dev_warn(dev, "buffer sharing not supported"); /* not an error */ - } + drm_kms_helper_poll_init(drm); drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL); @@ -638,7 +640,8 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) if (ret) return ret; - drm_kms_helper_poll_init(drm); + devm_kfree(dev, formats); + devm_kfree(dev, formats_dev); drm_client_setup(drm, NULL); diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index 54d9aa9998e5..76d77a736d84 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -61,7 +61,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format size_t len; void *buf; - WARN_ON_ONCE(format->char_per_block[0] != 1); + drm_WARN_ON_ONCE(fb->dev, format->char_per_block[0] != 1); /* Start on a byte boundary */ rect->x1 = ALIGN_DOWN(rect->x1, block_width); @@ -69,7 +69,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format height = drm_rect_height(rect); len = drm_format_info_min_pitch(format, 0, width) * height; - buf = kmalloc(width * height, GFP_KERNEL); + buf = kmalloc_array(height, width, GFP_KERNEL); if (!buf) return 0; @@ -138,7 +138,7 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7); break; default: - WARN_ON_ONCE(1); + drm_WARN_ON_ONCE(fb->dev, 1); return len; } @@ -527,7 +527,7 @@ int gud_plane_atomic_check(struct drm_plane *plane, drm_connector_list_iter_end(&conn_iter); } - if (WARN_ON_ONCE(!connector_state)) + if (drm_WARN_ON_ONCE(plane->dev, !connector_state)) return -ENOENT; len = struct_size(req, properties, @@ -539,7 +539,7 @@ int gud_plane_atomic_check(struct drm_plane *plane, gud_from_display_mode(&req->mode, mode); req->format = gud_from_fourcc(format->format); - if (WARN_ON_ONCE(!req->format)) { + if (drm_WARN_ON_ONCE(plane->dev, !req->format)) { ret = -EINVAL; goto out; } @@ -561,7 +561,7 @@ int gud_plane_atomic_check(struct drm_plane *plane, val = new_plane_state->rotation; break; default: - WARN_ON_ONCE(1); + drm_WARN_ON_ONCE(plane->dev, 1); ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 45c4eb008ad5..76384b4581bf 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -29,6 +29,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include <drm/drm_gem_framebuffer_helper.h> diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 1e1c87be1204..8a11c2df5b88 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -24,6 +24,7 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index 0d49f168a919..06b5d96e6eaf 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -14,6 +14,7 @@ #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "hyperv_drm.h" diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c index 945b9482bcb3..7978f8c8108c 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c @@ -19,6 +19,9 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_panic.h> #include <drm/drm_plane.h> +#include <drm/drm_print.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #include "hyperv_drm.h" @@ -111,11 +114,15 @@ static void hyperv_crtc_helper_atomic_enable(struct drm_crtc *crtc, crtc_state->mode.hdisplay, crtc_state->mode.vdisplay, plane_state->fb->pitches[0]); + + drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs hyperv_crtc_helper_funcs = { .atomic_check = drm_crtc_helper_atomic_check, + .atomic_flush = drm_crtc_vblank_atomic_flush, .atomic_enable = hyperv_crtc_helper_atomic_enable, + .atomic_disable = drm_crtc_vblank_atomic_disable, }; static const struct drm_crtc_funcs hyperv_crtc_funcs = { @@ -125,6 +132,7 @@ static const struct drm_crtc_funcs hyperv_crtc_funcs = { .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + DRM_CRTC_VBLANK_TIMER_FUNCS, }; static int hyperv_plane_atomic_check(struct drm_plane *plane, @@ -321,6 +329,10 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv) return ret; } + ret = drm_vblank_init(dev, 1); + if (ret) + return ret; + drm_mode_config_reset(dev); return 0; diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index e58c0c158b3a..7c89e5e0a277 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -13,6 +13,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror # drivers. Define I915 when building i915. subdir-ccflags-y += -DI915 +# FIXME: Disable tracepoints on i915 for PREEMPT_RT, unfortunately +# it's an all or nothing flag. You cannot selectively disable +# only some tracepoints. +subdir-ccflags-$(CONFIG_PREEMPT_RT) += -DNOTRACE + subdir-ccflags-y += -I$(src) # Please keep these build lists sorted! @@ -26,6 +31,7 @@ i915-y += \ i915_ioctl.o \ i915_irq.o \ i915_mitigations.o \ + i915_mmio_range.o \ i915_module.o \ i915_params.o \ i915_pci.o \ @@ -150,6 +156,7 @@ gem-y += \ gem/i915_gem_lmem.o \ gem/i915_gem_mman.o \ gem/i915_gem_object.o \ + gem/i915_gem_object_frontbuffer.o \ gem/i915_gem_pages.o \ gem/i915_gem_phys.o \ gem/i915_gem_pm.o \ @@ -228,6 +235,7 @@ i915-y += \ display/intel_bios.o \ display/intel_bo.o \ display/intel_bw.o \ + display/intel_casf.o \ display/intel_cdclk.o \ display/intel_cmtg.o \ display/intel_color.o \ @@ -236,6 +244,7 @@ i915-y += \ display/intel_crtc.o \ display/intel_crtc_state_dump.o \ display/intel_cursor.o \ + display/intel_dbuf_bw.o \ display/intel_display.o \ display/intel_display_conversion.o \ display/intel_display_driver.o \ @@ -248,6 +257,7 @@ i915-y += \ display/intel_display_rpm.o \ display/intel_display_rps.o \ display/intel_display_snapshot.o \ + display/intel_display_utils.o \ display/intel_display_wa.o \ display/intel_dmc.o \ display/intel_dmc_wl.o \ @@ -297,9 +307,11 @@ i915-y += \ display/intel_vblank.o \ display/intel_vga.o \ display/intel_wm.o \ + display/skl_prefill.o \ display/skl_scaler.o \ display/skl_universal_plane.o \ display/skl_watermark.o \ + display/vlv_clock.o \ display/vlv_sideband.o i915-$(CONFIG_ACPI) += \ display/intel_acpi.o \ @@ -346,6 +358,7 @@ i915-y += \ display/intel_gmbus.o \ display/intel_hdmi.o \ display/intel_lspcon.o \ + display/intel_lt_phy.o \ display/intel_lvds.o \ display/intel_panel.o \ display/intel_pfit.o \ @@ -413,7 +426,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o # # Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build ifdef CONFIG_DRM_I915_WERROR - cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none -Werror $< + cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none -Werror $< endif # header test diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index aa159f9ce12f..a3ff21b2f69f 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -11,7 +11,6 @@ #include "g4x_dp.h" #include "i915_reg.h" -#include "i915_utils.h" #include "intel_audio.h" #include "intel_backlight.h" #include "intel_connector.h" @@ -20,6 +19,7 @@ #include "intel_display_power.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_dp_aux.h" #include "intel_dp_link_training.h" diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 927fe56aec77..008d339d5c21 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -56,7 +56,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) * the HW state readout code will complain that the expected * IPS_CTL value is not the one we read. */ - if (intel_de_wait_for_set(display, IPS_CTL, IPS_ENABLE, 50)) + if (intel_de_wait_for_set_ms(display, IPS_CTL, IPS_ENABLE, 50)) drm_err(display->drm, "Timed out waiting for IPS enable\n"); } @@ -78,7 +78,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) * 42ms timeout value leads to occasional timeouts so use 100ms * instead. */ - if (intel_de_wait_for_clear(display, IPS_CTL, IPS_ENABLE, 100)) + if (intel_de_wait_for_clear_ms(display, IPS_CTL, IPS_ENABLE, 100)) drm_err(display->drm, "Timed out waiting for IPS disable\n"); } else { @@ -191,45 +191,46 @@ bool hsw_crtc_supports_ips(struct intel_crtc *crtc) static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) { - struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - /* IPS only exists on ULT machines and is tied to pipe A. */ if (!hsw_crtc_supports_ips(crtc)) return false; - if (!display->params.enable_ips) - return false; - if (crtc_state->pipe_bpp > 24) return false; - /* - * We compare against max which means we must take - * the increased cdclk requirement into account when - * calculating the new cdclk. - * - * Should measure whether using a lower cdclk w/o IPS - */ - if (display->platform.broadwell && - crtc_state->pixel_rate > display->cdclk.max_cdclk_freq * 95 / 100) - return false; - return true; } +static int _hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + if (display->platform.broadwell) + return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95); + + /* no IPS specific limits to worry about */ + return 0; +} + int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); + int min_cdclk; - if (!display->platform.broadwell) + if (!hsw_crtc_state_ips_capable(crtc_state)) return 0; - if (!hsw_crtc_state_ips_capable(crtc_state)) + min_cdclk = _hsw_ips_min_cdclk(crtc_state); + + /* + * Do not ask for more than the max CDCLK frequency, + * if that is not enough IPS will simply not be used. + */ + if (min_cdclk > display->cdclk.max_cdclk_freq) return 0; - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95); + return min_cdclk; } int hsw_ips_compute_config(struct intel_atomic_state *state, @@ -244,6 +245,12 @@ int hsw_ips_compute_config(struct intel_atomic_state *state, if (!hsw_crtc_state_ips_capable(crtc_state)) return 0; + if (_hsw_ips_min_cdclk(crtc_state) > display->cdclk.max_cdclk_freq) + return 0; + + if (!display->params.enable_ips) + return 0; + /* * When IPS gets enabled, the pipe CRC changes. Since IPS gets * enabled and disabled dynamically based on package C states, @@ -257,18 +264,6 @@ int hsw_ips_compute_config(struct intel_atomic_state *state, if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) return 0; - if (display->platform.broadwell) { - const struct intel_cdclk_state *cdclk_state; - - cdclk_state = intel_atomic_get_cdclk_state(state); - if (IS_ERR(cdclk_state)) - return PTR_ERR(cdclk_state); - - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (crtc_state->pixel_rate > intel_cdclk_logical(cdclk_state) * 95 / 100) - return 0; - } - crtc_state->ips_enabled = true; return 0; diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 407deb5dfb57..51ccc6bd5f21 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -11,7 +11,6 @@ #include <drm/drm_print.h> #include "i915_reg.h" -#include "i915_utils.h" #include "i9xx_plane.h" #include "i9xx_plane_regs.h" #include "intel_atomic.h" @@ -19,6 +18,7 @@ #include "intel_display_irq.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_fb.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" @@ -754,10 +754,9 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, static unsigned int hsw_primary_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { - const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */ @@ -766,10 +765,9 @@ hsw_primary_max_stride(struct intel_plane *plane, static unsigned int ilk_primary_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { - const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */ @@ -781,10 +779,9 @@ ilk_primary_max_stride(struct intel_plane *plane, unsigned int i965_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { - const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */ @@ -796,8 +793,8 @@ i965_plane_max_stride(struct intel_plane *plane, static unsigned int i915_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { if (modifier == I915_FORMAT_MOD_X_TILED) return 8 * 1024; @@ -807,8 +804,8 @@ i915_plane_max_stride(struct intel_plane *plane, static unsigned int i8xx_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { if (plane->i9xx_plane == PLANE_C) return 4 * 1024; @@ -1191,10 +1188,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, val = intel_de_read(display, DSPCNTR(display, i9xx_plane)); if (DISPLAY_VER(display) >= 4) { - if (val & DISP_TILED) { - plane_config->tiling = I915_TILING_X; + if (val & DISP_TILED) fb->modifier = I915_FORMAT_MOD_X_TILED; - } if (val & DISP_ROTATE_180) plane_config->rotation = DRM_MODE_ROTATE_180; @@ -1206,14 +1201,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, pixel_format = val & DISP_FORMAT_MASK; fourcc = i9xx_format_to_fourcc(pixel_format); - fb->format = drm_format_info(fourcc); + + fb->format = drm_get_format_info(display->drm, fourcc, fb->modifier); if (display->platform.haswell || display->platform.broadwell) { offset = intel_de_read(display, DSPOFFSET(display, i9xx_plane)); base = intel_de_read(display, DSPSURF(display, i9xx_plane)) & DISP_ADDR_MASK; } else if (DISPLAY_VER(display) >= 4) { - if (plane_config->tiling) + if (fb->modifier == I915_FORMAT_MOD_X_TILED) offset = intel_de_read(display, DSPTILEOFF(display, i9xx_plane)); else diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h index 565dab751301..ec78bf4dd35e 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.h +++ b/drivers/gpu/drm/i915/display/i9xx_plane.h @@ -9,6 +9,7 @@ #include <linux/types.h> enum pipe; +struct drm_format_info; struct drm_framebuffer; struct intel_crtc; struct intel_display; @@ -18,8 +19,8 @@ struct intel_plane_state; #ifdef I915 unsigned int i965_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation); + const struct drm_format_info *info, + u64 modifier, unsigned int rotation); unsigned int vlv_plane_min_alignment(struct intel_plane *plane, const struct drm_framebuffer *fb, int colot_plane); diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c index fd3b7b35f351..01f3803fa09f 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.c +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -5,6 +5,8 @@ #include <linux/iopoll.h> +#include <drm/drm_print.h> + #include "soc/intel_dram.h" #include "i915_drv.h" @@ -2295,12 +2297,11 @@ static void i9xx_update_wm(struct intel_display *display) crtc = single_enabled_crtc(display); if (display->platform.i915gm && crtc) { - struct drm_gem_object *obj; - - obj = intel_fb_bo(crtc->base.primary->state->fb); + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; /* self-refresh seems busted with untiled */ - if (!intel_bo_is_tiled(obj)) + if (fb->modifier == DRM_FORMAT_MOD_LINEAR) crtc = NULL; } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 37faa8f19f6e..9230792960f2 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -35,7 +35,6 @@ #include <drm/drm_probe_helper.h> #include "i915_reg.h" -#include "i915_utils.h" #include "icl_dsi.h" #include "icl_dsi_regs.h" #include "intel_atomic.h" @@ -48,6 +47,7 @@ #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_regs.h" +#include "intel_display_utils.h" #include "intel_dsi.h" #include "intel_dsi_vbt.h" #include "intel_panel.h" @@ -148,9 +148,9 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - ret = intel_de_wait_custom(display, DSI_LP_MSG(dsi_trans), - LPTX_IN_PROGRESS, 0, - 20, 0, NULL); + ret = intel_de_wait_for_clear_us(display, + DSI_LP_MSG(dsi_trans), + LPTX_IN_PROGRESS, 20); if (ret) drm_err(display->drm, "LPTX bit not cleared\n"); } @@ -534,9 +534,8 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); - ret = intel_de_wait_custom(display, DDI_BUF_CTL(port), - DDI_BUF_IS_IDLE, 0, - 500, 0, NULL); + ret = intel_de_wait_for_clear_us(display, DDI_BUF_CTL(port), + DDI_BUF_IS_IDLE, 500); if (ret) drm_err(display->drm, "DDI port:%c buffer idle\n", port_name(port)); @@ -857,9 +856,9 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); - ret = intel_de_wait_custom(display, DSI_TRANS_FUNC_CONF(dsi_trans), - LINK_READY, LINK_READY, - 2500, 0, NULL); + ret = intel_de_wait_for_set_us(display, + DSI_TRANS_FUNC_CONF(dsi_trans), + LINK_READY, 2500); if (ret) drm_err(display->drm, "DSI link not ready\n"); } @@ -1048,8 +1047,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) TRANSCONF_ENABLE); /* wait for transcoder to be enabled */ - if (intel_de_wait_for_set(display, TRANSCONF(display, dsi_trans), - TRANSCONF_STATE_ENABLE, 10)) + if (intel_de_wait_for_set_ms(display, TRANSCONF(display, dsi_trans), + TRANSCONF_STATE_ENABLE, 10)) drm_err(display->drm, "DSI transcoder not enabled\n"); } @@ -1317,8 +1316,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) TRANSCONF_ENABLE, 0); /* wait for transcoder to be disabled */ - if (intel_de_wait_for_clear(display, TRANSCONF(display, dsi_trans), - TRANSCONF_STATE_ENABLE, 50)) + if (intel_de_wait_for_clear_ms(display, TRANSCONF(display, dsi_trans), + TRANSCONF_STATE_ENABLE, 50)) drm_err(display->drm, "DSI trancoder not disabled\n"); } @@ -1358,9 +1357,8 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) tmp &= ~LINK_ULPS_TYPE_LP11; intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp); - ret = intel_de_wait_custom(display, DSI_LP_MSG(dsi_trans), - LINK_IN_ULPS, LINK_IN_ULPS, - 10, 0, NULL); + ret = intel_de_wait_for_set_us(display, DSI_LP_MSG(dsi_trans), + LINK_IN_ULPS, 10); if (ret) drm_err(display->drm, "DSI link not in ULPS\n"); } @@ -1395,9 +1393,8 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); - ret = intel_de_wait_custom(display, DDI_BUF_CTL(port), - DDI_BUF_IS_IDLE, DDI_BUF_IS_IDLE, - 8, 0, NULL); + ret = intel_de_wait_for_set_us(display, DDI_BUF_CTL(port), + DDI_BUF_IS_IDLE, 8); if (ret) drm_err(display->drm, @@ -1655,7 +1652,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, if (ret) return ret; - crtc_state->dsc.compression_enable = true; + intel_dsc_enable_on_crtc(crtc_state); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c index 1addd6288241..68c01932f7b4 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.c +++ b/drivers/gpu/drm/i915/display/intel_acpi.c @@ -11,10 +11,10 @@ #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_acpi.h" #include "intel_display_core.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c index ed7a7ed486b5..6372f533f65b 100644 --- a/drivers/gpu/drm/i915/display/intel_alpm.c +++ b/drivers/gpu/drm/i915/display/intel_alpm.c @@ -49,7 +49,7 @@ void intel_alpm_init(struct intel_dp *intel_dp) return; intel_dp->alpm_dpcd = dpcd; - mutex_init(&intel_dp->alpm_parameters.lock); + mutex_init(&intel_dp->alpm.lock); } static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state) @@ -58,43 +58,32 @@ static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state) 1000 / 1000; } -static int get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state, - int *min, int *max) +static void get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state, + int *min, int *max) { if (crtc_state->port_clock < 540000) { *min = 65 * LFPS_CYCLE_COUNT; *max = 75 * LFPS_CYCLE_COUNT; - } else if (crtc_state->port_clock <= 810000) { + } else { *min = 140; *max = 800; - } else { - *min = *max = -1; - return -1; } - - return 0; } static int get_lfps_cycle_time(const struct intel_crtc_state *crtc_state) { - int tlfps_cycle_min, tlfps_cycle_max, ret; + int tlfps_cycle_min, tlfps_cycle_max; - ret = get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min, - &tlfps_cycle_max); - if (ret) - return ret; + get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min, + &tlfps_cycle_max); return tlfps_cycle_min + (tlfps_cycle_max - tlfps_cycle_min) / 2; } static int get_lfps_half_cycle_clocks(const struct intel_crtc_state *crtc_state) { - int lfps_cycle_time = get_lfps_cycle_time(crtc_state); - - if (lfps_cycle_time < 0) - return -1; - - return lfps_cycle_time * crtc_state->port_clock / 1000 / 1000 / (2 * LFPS_CYCLE_COUNT); + return get_lfps_cycle_time(crtc_state) * crtc_state->port_clock / 1000 / + 1000 / (2 * LFPS_CYCLE_COUNT); } /* @@ -133,7 +122,7 @@ static int _lnl_compute_aux_less_wake_time(const struct intel_crtc_state *crtc_s static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) + struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(intel_dp); int aux_less_wake_time, aux_less_wake_lines, silence_period, @@ -146,8 +135,6 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp, silence_period = get_silence_period_symbols(crtc_state); lfps_half_cycle = get_lfps_half_cycle_clocks(crtc_state); - if (lfps_half_cycle < 0) - return false; if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK || silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK || @@ -157,15 +144,15 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp, if (display->params.psr_safest_params) aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK; - intel_dp->alpm_parameters.aux_less_wake_lines = aux_less_wake_lines; - intel_dp->alpm_parameters.silence_period_sym_clocks = silence_period; - intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle; + crtc_state->alpm_state.aux_less_wake_lines = aux_less_wake_lines; + crtc_state->alpm_state.silence_period_sym_clocks = silence_period; + crtc_state->alpm_state.lfps_half_cycle_num_of_syms = lfps_half_cycle; return true; } static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) + struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(intel_dp); int check_entry_lines; @@ -186,7 +173,7 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp, if (display->params.psr_safest_params) check_entry_lines = 15; - intel_dp->alpm_parameters.check_entry_lines = check_entry_lines; + crtc_state->alpm_state.check_entry_lines = check_entry_lines; return true; } @@ -217,7 +204,7 @@ static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state) } bool intel_alpm_compute_params(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) + struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(intel_dp); int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time; @@ -255,8 +242,8 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp, io_wake_lines = fast_wake_lines = max_wake_lines; /* According to Bspec lower limit should be set as 7 lines. */ - intel_dp->alpm_parameters.io_wake_lines = max(io_wake_lines, 7); - intel_dp->alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7); + crtc_state->alpm_state.io_wake_lines = max(io_wake_lines, 7); + crtc_state->alpm_state.fast_wake_lines = max(fast_wake_lines, 7); return true; } @@ -270,12 +257,12 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp, int waketime_in_lines, first_sdp_position; int context_latency, guardband; - if (intel_dp->alpm_parameters.lobf_disable_debug) { + if (intel_dp->alpm.lobf_disable_debug) { drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n"); return; } - if (intel_dp->alpm_parameters.sink_alpm_error) + if (intel_dp->alpm.sink_alpm_error) return; if (!intel_dp_is_edp(intel_dp)) @@ -306,9 +293,9 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp, adjusted_mode->crtc_vdisplay - context_latency; first_sdp_position = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_start; if (intel_alpm_aux_less_wake_supported(intel_dp)) - waketime_in_lines = intel_dp->alpm_parameters.io_wake_lines; + waketime_in_lines = crtc_state->alpm_state.io_wake_lines; else - waketime_in_lines = intel_dp->alpm_parameters.aux_less_wake_lines; + waketime_in_lines = crtc_state->alpm_state.aux_less_wake_lines; crtc_state->has_lobf = (context_latency + guardband) > (first_sdp_position + waketime_in_lines); @@ -325,7 +312,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp, !crtc_state->has_lobf)) return; - mutex_lock(&intel_dp->alpm_parameters.lock); + mutex_lock(&intel_dp->alpm.lock); /* * Panel Replay on eDP is always using ALPM aux less. I.e. no need to * check panel support at this point. @@ -334,7 +321,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp, alpm_ctl = ALPM_CTL_ALPM_ENABLE | ALPM_CTL_ALPM_AUX_LESS_ENABLE | ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS | - ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines); + ALPM_CTL_AUX_LESS_WAKE_TIME(crtc_state->alpm_state.aux_less_wake_lines); if (intel_dp->as_sdp_supported) { u32 pr_alpm_ctl = PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1; @@ -352,7 +339,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp, } else { alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE | - ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines); + ALPM_CTL_EXTENDED_FAST_WAKE_TIME(crtc_state->alpm_state.fast_wake_lines); } if (crtc_state->has_lobf) { @@ -360,17 +347,17 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp, drm_dbg_kms(display->drm, "Link off between frames (LOBF) enabled\n"); } - alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines); + alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(crtc_state->alpm_state.check_entry_lines); intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl); - mutex_unlock(&intel_dp->alpm_parameters.lock); + mutex_unlock(&intel_dp->alpm.lock); } void intel_alpm_configure(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { lnl_alpm_configure(intel_dp, crtc_state); - intel_dp->alpm_parameters.transcoder = crtc_state->cpu_transcoder; + intel_dp->alpm.transcoder = crtc_state->cpu_transcoder; } void intel_alpm_port_configure(struct intel_dp *intel_dp, @@ -388,14 +375,14 @@ void intel_alpm_port_configure(struct intel_dp *intel_dp, PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) | PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) | PORT_ALPM_CTL_SILENCE_PERIOD( - intel_dp->alpm_parameters.silence_period_sym_clocks); + crtc_state->alpm_state.silence_period_sym_clocks); lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(LFPS_CYCLE_COUNT) | PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION( - intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) | + crtc_state->alpm_state.lfps_half_cycle_num_of_syms) | PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION( - intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) | + crtc_state->alpm_state.lfps_half_cycle_num_of_syms) | PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION( - intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms); + crtc_state->alpm_state.lfps_half_cycle_num_of_syms); } intel_de_write(display, PORT_ALPM_CTL(port), alpm_ctl_val); @@ -433,10 +420,10 @@ void intel_alpm_pre_plane_update(struct intel_atomic_state *state, continue; if (old_crtc_state->has_lobf) { - mutex_lock(&intel_dp->alpm_parameters.lock); + mutex_lock(&intel_dp->alpm.lock); intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0); drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n"); - mutex_unlock(&intel_dp->alpm_parameters.lock); + mutex_unlock(&intel_dp->alpm.lock); } } } @@ -530,7 +517,7 @@ i915_edp_lobf_debug_get(void *data, u64 *val) struct intel_connector *connector = data; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - *val = intel_dp->alpm_parameters.lobf_disable_debug; + *val = intel_dp->alpm.lobf_disable_debug; return 0; } @@ -541,7 +528,7 @@ i915_edp_lobf_debug_set(void *data, u64 val) struct intel_connector *connector = data; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - intel_dp->alpm_parameters.lobf_disable_debug = val; + intel_dp->alpm.lobf_disable_debug = val; return 0; } @@ -569,12 +556,12 @@ void intel_alpm_lobf_debugfs_add(struct intel_connector *connector) void intel_alpm_disable(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - enum transcoder cpu_transcoder = intel_dp->alpm_parameters.transcoder; + enum transcoder cpu_transcoder = intel_dp->alpm.transcoder; if (DISPLAY_VER(display) < 20 || !intel_dp->alpm_dpcd) return; - mutex_lock(&intel_dp->alpm_parameters.lock); + mutex_lock(&intel_dp->alpm.lock); intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder), ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE | @@ -585,7 +572,7 @@ void intel_alpm_disable(struct intel_dp *intel_dp) PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); drm_dbg_kms(display->drm, "Disabling ALPM\n"); - mutex_unlock(&intel_dp->alpm_parameters.lock); + mutex_unlock(&intel_dp->alpm.lock); } bool intel_alpm_get_error(struct intel_dp *intel_dp) diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h index a861c20b5d79..53599b464dea 100644 --- a/drivers/gpu/drm/i915/display/intel_alpm.h +++ b/drivers/gpu/drm/i915/display/intel_alpm.h @@ -17,7 +17,7 @@ struct intel_crtc; void intel_alpm_init(struct intel_dp *intel_dp); bool intel_alpm_compute_params(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); + struct intel_crtc_state *crtc_state); void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 3b14f929825a..a68fdbd2acb9 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -13,7 +13,6 @@ #include <drm/drm_print.h> #include "i915_reg.h" -#include "i915_utils.h" #include "intel_backlight.h" #include "intel_backlight_regs.h" #include "intel_connector.h" @@ -21,6 +20,7 @@ #include "intel_display_regs.h" #include "intel_display_rpm.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp_aux_backlight.h" #include "intel_dsi_dcs_backlight.h" #include "intel_panel.h" diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 3596dce84c28..4b41068e9e35 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -32,15 +32,15 @@ #include <drm/display/drm_dsc_helper.h> #include <drm/drm_edid.h> #include <drm/drm_fixed.h> +#include <drm/drm_print.h> #include "soc/intel_rom.h" -#include "i915_drv.h" -#include "i915_utils.h" #include "intel_display.h" #include "intel_display_core.h" #include "intel_display_rpm.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_gmbus.h" #define _INTEL_BIOS_PRIVATE @@ -3144,7 +3144,6 @@ err_free_rom: static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display, size_t *sizep) { - struct drm_i915_private *i915 = to_i915(display->drm); const struct vbt_header *vbt = NULL; vbt = firmware_get_vbt(display, sizep); @@ -3158,11 +3157,11 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display */ if (!vbt && display->platform.dgfx) with_intel_display_rpm(display) - vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash"); + vbt = oprom_get_vbt(display, intel_rom_spi(display->drm), sizep, "SPI flash"); if (!vbt) with_intel_display_rpm(display) - vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM"); + vbt = oprom_get_vbt(display, intel_rom_pci(display->drm), sizep, "PCI ROM"); return vbt; } diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c index 6ae1374d5c2b..f3687eb63467 100644 --- a/drivers/gpu/drm/i915/display/intel_bo.c +++ b/drivers/gpu/drm/i915/display/intel_bo.c @@ -29,11 +29,6 @@ bool intel_bo_is_protected(struct drm_gem_object *obj) return i915_gem_object_is_protected(to_intel_bo(obj)); } -void intel_bo_flush_if_display(struct drm_gem_object *obj) -{ - i915_gem_object_flush_if_display(to_intel_bo(obj)); -} - int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { return i915_gem_fb_mmap(to_intel_bo(obj), vma); @@ -44,15 +39,40 @@ int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, i return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size); } -struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj) +struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *_obj) +{ + struct drm_i915_gem_object *obj = to_intel_bo(_obj); + struct i915_frontbuffer *front; + + front = i915_gem_object_frontbuffer_get(obj); + if (!front) + return NULL; + + return &front->base; +} + +void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front) { - return i915_gem_object_get_frontbuffer(to_intel_bo(obj)); + struct i915_frontbuffer *front = + container_of(_front, typeof(*front), base); + + i915_gem_object_frontbuffer_ref(front); } -struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, - struct intel_frontbuffer *front) +void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front) { - return i915_gem_object_set_frontbuffer(to_intel_bo(obj), front); + struct i915_frontbuffer *front = + container_of(_front, typeof(*front), base); + + return i915_gem_object_frontbuffer_put(front); +} + +void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *_front) +{ + struct i915_frontbuffer *front = + container_of(_front, typeof(*front), base); + + i915_gem_object_flush_if_display(front->obj); } void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h index 48d87019e48a..fc05f680dc76 100644 --- a/drivers/gpu/drm/i915/display/intel_bo.h +++ b/drivers/gpu/drm/i915/display/intel_bo.h @@ -16,13 +16,13 @@ bool intel_bo_is_tiled(struct drm_gem_object *obj); bool intel_bo_is_userptr(struct drm_gem_object *obj); bool intel_bo_is_shmem(struct drm_gem_object *obj); bool intel_bo_is_protected(struct drm_gem_object *obj); -void intel_bo_flush_if_display(struct drm_gem_object *obj); int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size); -struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj); -struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, - struct intel_frontbuffer *front); +struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj); +void intel_bo_frontbuffer_ref(struct intel_frontbuffer *front); +void intel_bo_frontbuffer_put(struct intel_frontbuffer *front); +void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front); void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index ac6da20d9529..1f6461be50ef 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -4,31 +4,25 @@ */ #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_print.h> #include "soc/intel_dram.h" #include "i915_drv.h" #include "i915_reg.h" -#include "i915_utils.h" -#include "intel_atomic.h" #include "intel_bw.h" -#include "intel_cdclk.h" +#include "intel_crtc.h" #include "intel_display_core.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_mchbar_regs.h" #include "intel_pcode.h" #include "intel_uncore.h" #include "skl_watermark.h" -struct intel_dbuf_bw { - unsigned int max_bw[I915_MAX_DBUF_SLICES]; - u8 active_planes[I915_MAX_DBUF_SLICES]; -}; - struct intel_bw_state { struct intel_global_state base; - struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES]; /* * Contains a bit mask, used to determine, whether correspondent @@ -811,72 +805,40 @@ void intel_bw_init_hw(struct intel_display *display) if (!HAS_DISPLAY(display)) return; - if (DISPLAY_VERx100(display) >= 3002) - tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info); - else if (DISPLAY_VER(display) >= 30) - tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info); - else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx && - dram_info->type == INTEL_DRAM_GDDR_ECC) - xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info); - else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) - xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info); - else if (DISPLAY_VER(display) >= 14) + /* + * Starting with Xe3p_LPD, the hardware tells us whether memory has ECC + * enabled that would impact display bandwidth. However, so far there + * are no instructions in Bspec on how to handle that case. Let's + * complain if we ever find such a scenario. + */ + if (DISPLAY_VER(display) >= 35) + drm_WARN_ON(display->drm, dram_info->ecc_impacting_de_bw); + + if (DISPLAY_VER(display) >= 30) { + if (DISPLAY_VERx100(display) == 3002) + tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info); + else + tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info); + } else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) { + if (dram_info->type == INTEL_DRAM_GDDR_ECC) + xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info); + else + xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info); + } else if (DISPLAY_VER(display) >= 14) { tgl_get_bw_info(display, dram_info, &mtl_sa_info); - else if (display->platform.dg2) + } else if (display->platform.dg2) { dg2_get_bw_info(display); - else if (display->platform.alderlake_p) + } else if (display->platform.alderlake_p) { tgl_get_bw_info(display, dram_info, &adlp_sa_info); - else if (display->platform.alderlake_s) + } else if (display->platform.alderlake_s) { tgl_get_bw_info(display, dram_info, &adls_sa_info); - else if (display->platform.rocketlake) + } else if (display->platform.rocketlake) { tgl_get_bw_info(display, dram_info, &rkl_sa_info); - else if (DISPLAY_VER(display) == 12) + } else if (DISPLAY_VER(display) == 12) { tgl_get_bw_info(display, dram_info, &tgl_sa_info); - else if (DISPLAY_VER(display) == 11) + } else if (DISPLAY_VER(display) == 11) { icl_get_bw_info(display, dram_info, &icl_sa_info); -} - -static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state) -{ - /* - * We assume cursors are small enough - * to not not cause bandwidth problems. - */ - return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR)); -} - -static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state) -{ - struct intel_display *display = to_intel_display(crtc_state); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - unsigned int data_rate = 0; - enum plane_id plane_id; - - for_each_plane_id_on_crtc(crtc, plane_id) { - /* - * We assume cursors are small enough - * to not not cause bandwidth problems. - */ - if (plane_id == PLANE_CURSOR) - continue; - - data_rate += crtc_state->data_rate[plane_id]; - - if (DISPLAY_VER(display) < 11) - data_rate += crtc_state->data_rate_y[plane_id]; } - - return data_rate; -} - -/* "Maximum Pipe Read Bandwidth" */ -static int intel_bw_crtc_min_cdclk(struct intel_display *display, - unsigned int data_rate) -{ - if (DISPLAY_VER(display) < 12) - return 0; - - return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512); } static unsigned int intel_bw_num_active_planes(struct intel_display *display, @@ -894,14 +856,13 @@ static unsigned int intel_bw_num_active_planes(struct intel_display *display, static unsigned int intel_bw_data_rate(struct intel_display *display, const struct intel_bw_state *bw_state) { - struct drm_i915_private *i915 = to_i915(display->drm); unsigned int data_rate = 0; enum pipe pipe; for_each_pipe(display, pipe) data_rate += bw_state->data_rate[pipe]; - if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915)) + if (DISPLAY_VER(display) >= 13 && intel_display_vtd_active(display)) data_rate = DIV_ROUND_UP(data_rate * 105, 100); return data_rate; @@ -1262,223 +1223,6 @@ static int intel_bw_check_qgv_points(struct intel_display *display, old_bw_state, new_bw_state); } -static bool intel_dbuf_bw_changed(struct intel_display *display, - const struct intel_dbuf_bw *old_dbuf_bw, - const struct intel_dbuf_bw *new_dbuf_bw) -{ - enum dbuf_slice slice; - - for_each_dbuf_slice(display, slice) { - if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] || - old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice]) - return true; - } - - return false; -} - -static bool intel_bw_state_changed(struct intel_display *display, - const struct intel_bw_state *old_bw_state, - const struct intel_bw_state *new_bw_state) -{ - enum pipe pipe; - - for_each_pipe(display, pipe) { - const struct intel_dbuf_bw *old_dbuf_bw = - &old_bw_state->dbuf_bw[pipe]; - const struct intel_dbuf_bw *new_dbuf_bw = - &new_bw_state->dbuf_bw[pipe]; - - if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw)) - return true; - - if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) != - intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe])) - return true; - } - - return false; -} - -static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw, - struct intel_crtc *crtc, - enum plane_id plane_id, - const struct skl_ddb_entry *ddb, - unsigned int data_rate) -{ - struct intel_display *display = to_intel_display(crtc); - unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb); - enum dbuf_slice slice; - - /* - * The arbiter can only really guarantee an - * equal share of the total bw to each plane. - */ - for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) { - dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate); - dbuf_bw->active_planes[slice] |= BIT(plane_id); - } -} - -static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw, - const struct intel_crtc_state *crtc_state) -{ - struct intel_display *display = to_intel_display(crtc_state); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum plane_id plane_id; - - memset(dbuf_bw, 0, sizeof(*dbuf_bw)); - - if (!crtc_state->hw.active) - return; - - for_each_plane_id_on_crtc(crtc, plane_id) { - /* - * We assume cursors are small enough - * to not cause bandwidth problems. - */ - if (plane_id == PLANE_CURSOR) - continue; - - skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id, - &crtc_state->wm.skl.plane_ddb[plane_id], - crtc_state->data_rate[plane_id]); - - if (DISPLAY_VER(display) < 11) - skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id, - &crtc_state->wm.skl.plane_ddb_y[plane_id], - crtc_state->data_rate[plane_id]); - } -} - -/* "Maximum Data Buffer Bandwidth" */ -static int -intel_bw_dbuf_min_cdclk(struct intel_display *display, - const struct intel_bw_state *bw_state) -{ - unsigned int total_max_bw = 0; - enum dbuf_slice slice; - - for_each_dbuf_slice(display, slice) { - int num_active_planes = 0; - unsigned int max_bw = 0; - enum pipe pipe; - - /* - * The arbiter can only really guarantee an - * equal share of the total bw to each plane. - */ - for_each_pipe(display, pipe) { - const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe]; - - max_bw = max(dbuf_bw->max_bw[slice], max_bw); - num_active_planes += hweight8(dbuf_bw->active_planes[slice]); - } - max_bw *= num_active_planes; - - total_max_bw = max(total_max_bw, max_bw); - } - - return DIV_ROUND_UP(total_max_bw, 64); -} - -int intel_bw_min_cdclk(struct intel_display *display, - const struct intel_bw_state *bw_state) -{ - enum pipe pipe; - int min_cdclk; - - min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state); - - for_each_pipe(display, pipe) - min_cdclk = max(min_cdclk, - intel_bw_crtc_min_cdclk(display, - bw_state->data_rate[pipe])); - - return min_cdclk; -} - -int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, - bool *need_cdclk_calc) -{ - struct intel_display *display = to_intel_display(state); - struct intel_bw_state *new_bw_state = NULL; - const struct intel_bw_state *old_bw_state = NULL; - const struct intel_cdclk_state *cdclk_state; - const struct intel_crtc_state *old_crtc_state; - const struct intel_crtc_state *new_crtc_state; - int old_min_cdclk, new_min_cdclk; - struct intel_crtc *crtc; - int i; - - if (DISPLAY_VER(display) < 9) - return 0; - - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw; - - skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state); - skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state); - - if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw)) - continue; - - new_bw_state = intel_atomic_get_bw_state(state); - if (IS_ERR(new_bw_state)) - return PTR_ERR(new_bw_state); - - old_bw_state = intel_atomic_get_old_bw_state(state); - - new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw; - } - - if (!old_bw_state) - return 0; - - if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) { - int ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - - old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state); - new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state); - - /* - * No need to check against the cdclk state if - * the min cdclk doesn't increase. - * - * Ie. we only ever increase the cdclk due to bandwidth - * requirements. This can reduce back and forth - * display blinking due to constant cdclk changes. - */ - if (new_min_cdclk <= old_min_cdclk) - return 0; - - cdclk_state = intel_atomic_get_cdclk_state(state); - if (IS_ERR(cdclk_state)) - return PTR_ERR(cdclk_state); - - /* - * No need to recalculate the cdclk state if - * the min cdclk doesn't increase. - * - * Ie. we only ever increase the cdclk due to bandwidth - * requirements. This can reduce back and forth - * display blinking due to constant cdclk changes. - */ - if (new_min_cdclk <= intel_cdclk_bw_min_cdclk(cdclk_state)) - return 0; - - drm_dbg_kms(display->drm, - "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n", - new_min_cdclk, intel_cdclk_bw_min_cdclk(cdclk_state)); - *need_cdclk_calc = true; - - return 0; -} - static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed) { struct intel_display *display = to_intel_display(state); @@ -1489,13 +1233,13 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { unsigned int old_data_rate = - intel_bw_crtc_data_rate(old_crtc_state); + intel_crtc_bw_data_rate(old_crtc_state); unsigned int new_data_rate = - intel_bw_crtc_data_rate(new_crtc_state); + intel_crtc_bw_data_rate(new_crtc_state); unsigned int old_active_planes = - intel_bw_crtc_num_active_planes(old_crtc_state); + intel_crtc_bw_num_active_planes(old_crtc_state); unsigned int new_active_planes = - intel_bw_crtc_num_active_planes(new_crtc_state); + intel_crtc_bw_num_active_planes(new_crtc_state); struct intel_bw_state *new_bw_state; /* @@ -1527,11 +1271,11 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan static int intel_bw_modeset_checks(struct intel_atomic_state *state) { - struct intel_display *display = to_intel_display(state); const struct intel_bw_state *old_bw_state; struct intel_bw_state *new_bw_state; + int ret; - if (DISPLAY_VER(display) < 9) + if (!intel_any_crtc_active_changed(state)) return 0; new_bw_state = intel_atomic_get_bw_state(state); @@ -1543,13 +1287,9 @@ static int intel_bw_modeset_checks(struct intel_atomic_state *state) new_bw_state->active_pipes = intel_calc_active_pipes(state, old_bw_state->active_pipes); - if (new_bw_state->active_pipes != old_bw_state->active_pipes) { - int ret; - - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; return 0; } @@ -1599,7 +1339,7 @@ static int intel_bw_check_sagv_mask(struct intel_atomic_state *state) return 0; } -int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms) +int intel_bw_atomic_check(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); bool changed = false; @@ -1610,11 +1350,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms) if (DISPLAY_VER(display) < 9) return 0; - if (any_ms) { - ret = intel_bw_modeset_checks(state); - if (ret) - return ret; - } + ret = intel_bw_modeset_checks(state); + if (ret) + return ret; ret = intel_bw_check_sagv_mask(state); if (ret) @@ -1657,9 +1395,9 @@ static void intel_bw_crtc_update(struct intel_bw_state *bw_state, struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); bw_state->data_rate[crtc->pipe] = - intel_bw_crtc_data_rate(crtc_state); + intel_crtc_bw_data_rate(crtc_state); bw_state->num_active_planes[crtc->pipe] = - intel_bw_crtc_num_active_planes(crtc_state); + intel_crtc_bw_num_active_planes(crtc_state); drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n", pipe_name(crtc->pipe), @@ -1690,8 +1428,6 @@ void intel_bw_update_hw_state(struct intel_display *display) if (DISPLAY_VER(display) >= 11) intel_bw_crtc_update(bw_state, crtc_state); - skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state); - /* initially SAGV has been forced off */ bw_state->pipe_sagv_reject |= BIT(pipe); } @@ -1709,7 +1445,6 @@ void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc) bw_state->data_rate[pipe] = 0; bw_state->num_active_planes[pipe] = 0; - memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe])); } static struct intel_global_state * diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index d51f50c9d302..99b447388245 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -28,11 +28,7 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state); void intel_bw_init_hw(struct intel_display *display); int intel_bw_init(struct intel_display *display); -int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms); -int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, - bool *need_cdclk_calc); -int intel_bw_min_cdclk(struct intel_display *display, - const struct intel_bw_state *bw_state); +int intel_bw_atomic_check(struct intel_atomic_state *state); void intel_bw_update_hw_state(struct intel_display *display); void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc); diff --git a/drivers/gpu/drm/i915/display/intel_casf.c b/drivers/gpu/drm/i915/display/intel_casf.c new file mode 100644 index 000000000000..95339b496f24 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_casf.c @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include <drm/drm_print.h> + +#include "i915_reg.h" +#include "intel_casf.h" +#include "intel_casf_regs.h" +#include "intel_de.h" +#include "intel_display_regs.h" +#include "intel_display_types.h" +#include "skl_scaler.h" + +#define MAX_PIXELS_FOR_3_TAP_FILTER (1920 * 1080) +#define MAX_PIXELS_FOR_5_TAP_FILTER (3840 * 2160) + +#define FILTER_COEFF_0_125 125 +#define FILTER_COEFF_0_25 250 +#define FILTER_COEFF_0_5 500 +#define FILTER_COEFF_1_0 1000 +#define FILTER_COEFF_0_0 0 +#define SET_POSITIVE_SIGN(x) ((x) & (~SIGN)) + +/** + * DOC: Content Adaptive Sharpness Filter (CASF) + * + * Starting from LNL the display engine supports an + * adaptive sharpening filter, enhancing the image + * quality. The display hardware utilizes the second + * pipe scaler for implementing CASF. + * If sharpness is being enabled then pipe scaling + * cannot be used. + * This filter operates on a region of pixels based + * on the tap size. Coefficients are used to generate + * an alpha value which blends the sharpened image to + * original image. + */ + +/* Default LUT values to be loaded one time. */ +static const u16 sharpness_lut[] = { + 4095, 2047, 1364, 1022, 816, 678, 579, + 504, 444, 397, 357, 323, 293, 268, 244, 224, + 204, 187, 170, 154, 139, 125, 111, 98, 85, + 73, 60, 48, 36, 24, 12, 0 +}; + +const u16 filtercoeff_1[] = { + FILTER_COEFF_0_0, FILTER_COEFF_0_0, FILTER_COEFF_0_5, + FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_0, + FILTER_COEFF_0_0, +}; + +const u16 filtercoeff_2[] = { + FILTER_COEFF_0_0, FILTER_COEFF_0_25, FILTER_COEFF_0_5, + FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_25, + FILTER_COEFF_0_0, +}; + +const u16 filtercoeff_3[] = { + FILTER_COEFF_0_125, FILTER_COEFF_0_25, FILTER_COEFF_0_5, + FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_25, + FILTER_COEFF_0_125, +}; + +static void intel_casf_filter_lut_load(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + int i; + + intel_de_write(display, SHRPLUT_INDEX(crtc->pipe), + INDEX_AUTO_INCR | INDEX_VALUE(0)); + + for (i = 0; i < ARRAY_SIZE(sharpness_lut); i++) + intel_de_write(display, SHRPLUT_DATA(crtc->pipe), + sharpness_lut[i]); +} + +void intel_casf_update_strength(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + int win_size; + + intel_de_rmw(display, SHARPNESS_CTL(crtc->pipe), FILTER_STRENGTH_MASK, + FILTER_STRENGTH(crtc_state->hw.casf_params.strength)); + + win_size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, 1)); + + intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, 1), win_size); +} + +static void intel_casf_compute_win_size(struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *mode = &crtc_state->hw.adjusted_mode; + u32 total_pixels = mode->hdisplay * mode->vdisplay; + + if (total_pixels <= MAX_PIXELS_FOR_3_TAP_FILTER) + crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_3X3; + else if (total_pixels <= MAX_PIXELS_FOR_5_TAP_FILTER) + crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_5X5; + else + crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_7X7; +} + +int intel_casf_compute_config(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + if (!HAS_CASF(display)) + return 0; + + if (crtc_state->uapi.sharpness_strength == 0) { + crtc_state->hw.casf_params.casf_enable = false; + crtc_state->hw.casf_params.strength = 0; + return 0; + } + + crtc_state->hw.casf_params.casf_enable = true; + + /* + * HW takes a value in form (1.0 + strength) in 4.4 fixed format. + * Strength is from 0.0-14.9375 ie from 0-239. + * User can give value from 0-255 but is clamped to 239. + * Ex. User gives 85 which is 5.3125 and adding 1.0 gives 6.3125. + * 6.3125 in 4.4 format is b01100101 which is equal to 101. + * Also 85 + 16 = 101. + */ + crtc_state->hw.casf_params.strength = + min(crtc_state->uapi.sharpness_strength, 0xEF) + 0x10; + + intel_casf_compute_win_size(crtc_state); + + intel_casf_scaler_compute_config(crtc_state); + + return 0; +} + +void intel_casf_sharpness_get_config(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + u32 sharp; + + sharp = intel_de_read(display, SHARPNESS_CTL(crtc->pipe)); + if (sharp & FILTER_EN) { + if (drm_WARN_ON(display->drm, + REG_FIELD_GET(FILTER_STRENGTH_MASK, sharp) < 16)) + crtc_state->hw.casf_params.strength = 0; + else + crtc_state->hw.casf_params.strength = + REG_FIELD_GET(FILTER_STRENGTH_MASK, sharp); + crtc_state->hw.casf_params.casf_enable = true; + crtc_state->hw.casf_params.win_size = + REG_FIELD_GET(FILTER_SIZE_MASK, sharp); + } +} + +bool intel_casf_needs_scaler(const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->hw.casf_params.casf_enable) + return true; + + return false; +} + +static int casf_coeff_tap(int i) +{ + return i % SCALER_FILTER_NUM_TAPS; +} + +static u32 casf_coeff(struct intel_crtc_state *crtc_state, int t) +{ + struct scaler_filter_coeff value; + u32 coeff; + + value = crtc_state->hw.casf_params.coeff[t]; + value.sign = 0; + + coeff = value.sign << 15 | value.exp << 12 | value.mantissa << 3; + return coeff; +} + +/* + * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set. + * To enable casf: program scaler coefficients with the coeffients + * that are calculated and stored in hw.casf_params.coeff as per + * SCALER_COEFFICIENT_FORMAT + */ +static void intel_casf_write_coeff(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + int id = crtc_state->scaler_state.scaler_id; + int i; + + if (id != 1) { + drm_WARN(display->drm, 0, "Second scaler not enabled\n"); + return; + } + + intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(crtc->pipe, id, 0), + PS_COEF_INDEX_AUTO_INC); + + for (i = 0; i < 17 * SCALER_FILTER_NUM_TAPS; i += 2) { + u32 tmp; + int t; + + t = casf_coeff_tap(i); + tmp = casf_coeff(crtc_state, t); + + t = casf_coeff_tap(i + 1); + tmp |= casf_coeff(crtc_state, t) << 16; + + intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(crtc->pipe, id, 0), + tmp); + } +} + +static void convert_sharpness_coef_binary(struct scaler_filter_coeff *coeff, + u16 coefficient) +{ + if (coefficient < 25) { + coeff->mantissa = (coefficient * 2048) / 100; + coeff->exp = 3; + } else if (coefficient < 50) { + coeff->mantissa = (coefficient * 1024) / 100; + coeff->exp = 2; + } else if (coefficient < 100) { + coeff->mantissa = (coefficient * 512) / 100; + coeff->exp = 1; + } else { + coeff->mantissa = (coefficient * 256) / 100; + coeff->exp = 0; + } +} + +void intel_casf_scaler_compute_config(struct intel_crtc_state *crtc_state) +{ + const u16 *filtercoeff; + u16 filter_coeff[SCALER_FILTER_NUM_TAPS]; + u16 sumcoeff = 0; + int i; + + if (crtc_state->hw.casf_params.win_size == 0) + filtercoeff = filtercoeff_1; + else if (crtc_state->hw.casf_params.win_size == 1) + filtercoeff = filtercoeff_2; + else + filtercoeff = filtercoeff_3; + + for (i = 0; i < SCALER_FILTER_NUM_TAPS; i++) + sumcoeff += *(filtercoeff + i); + + for (i = 0; i < SCALER_FILTER_NUM_TAPS; i++) { + filter_coeff[i] = (*(filtercoeff + i) * 100 / sumcoeff); + convert_sharpness_coef_binary(&crtc_state->hw.casf_params.coeff[i], + filter_coeff[i]); + } +} + +void intel_casf_enable(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + u32 sharpness_ctl; + + intel_casf_filter_lut_load(crtc, crtc_state); + + intel_casf_write_coeff(crtc_state); + + sharpness_ctl = FILTER_EN | FILTER_STRENGTH(crtc_state->hw.casf_params.strength); + + sharpness_ctl |= crtc_state->hw.casf_params.win_size; + + intel_de_write(display, SHARPNESS_CTL(crtc->pipe), sharpness_ctl); + + skl_scaler_setup_casf(crtc_state); +} + +void intel_casf_disable(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + intel_de_write(display, SKL_PS_CTRL(crtc->pipe, 1), 0); + intel_de_write(display, SKL_PS_WIN_POS(crtc->pipe, 1), 0); + intel_de_write(display, SHARPNESS_CTL(crtc->pipe), 0); + intel_de_write(display, SKL_PS_WIN_SZ(crtc->pipe, 1), 0); +} diff --git a/drivers/gpu/drm/i915/display/intel_casf.h b/drivers/gpu/drm/i915/display/intel_casf.h new file mode 100644 index 000000000000..b3fb0bcb3f5b --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_casf.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef __INTEL_CASF_H__ +#define __INTEL_CASF_H__ + +#include <linux/types.h> + +struct intel_crtc_state; + +int intel_casf_compute_config(struct intel_crtc_state *crtc_state); +void intel_casf_update_strength(struct intel_crtc_state *new_crtc_state); +void intel_casf_sharpness_get_config(struct intel_crtc_state *crtc_state); +void intel_casf_enable(struct intel_crtc_state *crtc_state); +void intel_casf_disable(const struct intel_crtc_state *crtc_state); +void intel_casf_scaler_compute_config(struct intel_crtc_state *crtc_state); +bool intel_casf_needs_scaler(const struct intel_crtc_state *crtc_state); + +#endif /* __INTEL_CASF_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_casf_regs.h b/drivers/gpu/drm/i915/display/intel_casf_regs.h new file mode 100644 index 000000000000..87803cca510f --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_casf_regs.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef __INTEL_CASF_REGS_H__ +#define __INTEL_CASF_REGS_H__ + +#include "intel_display_reg_defs.h" + +#define _SHARPNESS_CTL_A 0x682B0 +#define _SHARPNESS_CTL_B 0x68AB0 +#define SHARPNESS_CTL(pipe) _MMIO_PIPE(pipe, _SHARPNESS_CTL_A, _SHARPNESS_CTL_B) +#define FILTER_EN REG_BIT(31) +#define FILTER_STRENGTH_MASK REG_GENMASK(15, 8) +#define FILTER_STRENGTH(x) REG_FIELD_PREP(FILTER_STRENGTH_MASK, (x)) +#define FILTER_SIZE_MASK REG_GENMASK(1, 0) +#define SHARPNESS_FILTER_SIZE_3X3 REG_FIELD_PREP(FILTER_SIZE_MASK, 0) +#define SHARPNESS_FILTER_SIZE_5X5 REG_FIELD_PREP(FILTER_SIZE_MASK, 1) +#define SHARPNESS_FILTER_SIZE_7X7 REG_FIELD_PREP(FILTER_SIZE_MASK, 2) + +#define _SHRPLUT_DATA_A 0x682B8 +#define _SHRPLUT_DATA_B 0x68AB8 +#define SHRPLUT_DATA(pipe) _MMIO_PIPE(pipe, _SHRPLUT_DATA_A, _SHRPLUT_DATA_B) + +#define _SHRPLUT_INDEX_A 0x682B4 +#define _SHRPLUT_INDEX_B 0x68AB4 +#define SHRPLUT_INDEX(pipe) _MMIO_PIPE(pipe, _SHRPLUT_INDEX_A, _SHRPLUT_INDEX_B) +#define INDEX_AUTO_INCR REG_BIT(10) +#define INDEX_VALUE_MASK REG_GENMASK(4, 0) +#define INDEX_VALUE(x) REG_FIELD_PREP(INDEX_VALUE_MASK, (x)) + +#endif /* __INTEL_CASF_REGS__ */ diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 9725eebe5706..37801c744b05 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -26,21 +26,22 @@ #include <linux/time.h> #include <drm/drm_fixed.h> +#include <drm/drm_print.h> #include "soc/intel_dram.h" #include "hsw_ips.h" #include "i915_drv.h" #include "i915_reg.h" -#include "i915_utils.h" #include "intel_atomic.h" #include "intel_audio.h" -#include "intel_bw.h" #include "intel_cdclk.h" #include "intel_crtc.h" +#include "intel_dbuf_bw.h" #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_mchbar_regs.h" #include "intel_pci_config.h" #include "intel_pcode.h" @@ -49,6 +50,7 @@ #include "intel_vdsc.h" #include "skl_watermark.h" #include "skl_watermark_regs.h" +#include "vlv_clock.h" #include "vlv_dsi.h" #include "vlv_sideband.h" @@ -132,8 +134,8 @@ struct intel_cdclk_state { */ struct intel_cdclk_config actual; - /* minimum acceptable cdclk to satisfy bandwidth requirements */ - int bw_min_cdclk; + /* minimum acceptable cdclk to satisfy DBUF bandwidth requirements */ + int dbuf_bw_min_cdclk; /* minimum acceptable cdclk for each pipe */ int min_cdclk[I915_MAX_PIPES]; /* minimum acceptable voltage level for each pipe */ @@ -145,6 +147,9 @@ struct intel_cdclk_state { /* forced minimum cdclk for glk+ audio w/a */ int force_min_cdclk; + /* bitmask of enabled pipes */ + u8 enabled_pipes; + /* bitmask of active pipes */ u8 active_pipes; @@ -563,8 +568,7 @@ static void hsw_get_cdclk(struct intel_display *display, static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? + int freq_320 = (vlv_clock_get_hpll_vco(display->drm) << 1) % 320000 != 0 ? 333333 : 320000; /* @@ -584,8 +588,6 @@ static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk) static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (display->platform.valleyview) { if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ return 2; @@ -599,7 +601,7 @@ static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk) * hardware has shown that we just need to write the desired * CCK divider into the Punit register. */ - return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; + return DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1, cdclk) - 1; } } @@ -608,17 +610,12 @@ static void vlv_get_cdclk(struct intel_display *display, { u32 val; - vlv_iosf_sb_get(display->drm, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); - - cdclk_config->vco = vlv_get_hpll_vco(display->drm); - cdclk_config->cdclk = vlv_get_cck_clock(display->drm, "cdclk", - CCK_DISPLAY_CLOCK_CONTROL, - cdclk_config->vco); + cdclk_config->vco = vlv_clock_get_hpll_vco(display->drm); + cdclk_config->cdclk = vlv_clock_get_cdclk(display->drm); + vlv_punit_get(display->drm); val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM); - - vlv_iosf_sb_put(display->drm, - BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); + vlv_punit_put(display->drm); if (display->platform.valleyview) cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >> @@ -630,7 +627,6 @@ static void vlv_get_cdclk(struct intel_display *display, static void vlv_program_pfi_credits(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); unsigned int credits, default_credits; if (display->platform.cherryview) @@ -638,7 +634,7 @@ static void vlv_program_pfi_credits(struct intel_display *display) else default_credits = PFI_CREDIT(8); - if (display->cdclk.hw.cdclk >= dev_priv->czclk_freq) { + if (display->cdclk.hw.cdclk >= vlv_clock_get_czclk(display->drm)) { /* CHV suggested value is 31 or 63 */ if (display->platform.cherryview) credits = PFI_CREDIT_63; @@ -670,7 +666,6 @@ static void vlv_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; @@ -715,7 +710,7 @@ static void vlv_set_cdclk(struct intel_display *display, if (cdclk == 400000) { u32 divider; - divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, + divider = DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1, cdclk) - 1; /* adjust cdclk divider */ @@ -907,9 +902,8 @@ static void bdw_set_cdclk(struct intel_display *display, * According to the spec, it should be enough to poll for this 1 us. * However, extensive testing shows that this can take longer. */ - ret = intel_de_wait_custom(display, LCPLL_CTL, - LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE, - 100, 0, NULL); + ret = intel_de_wait_for_set_us(display, LCPLL_CTL, + LCPLL_CD_SOURCE_FCLK_DONE, 100); if (ret) drm_err(display->drm, "Switching to FCLK failed\n"); @@ -919,9 +913,8 @@ static void bdw_set_cdclk(struct intel_display *display, intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); - ret = intel_de_wait_custom(display, LCPLL_CTL, - LCPLL_CD_SOURCE_FCLK_DONE, 0, - 1, 0, NULL); + ret = intel_de_wait_for_clear_us(display, LCPLL_CTL, + LCPLL_CD_SOURCE_FCLK_DONE, 1); if (ret) drm_err(display->drm, "Switching back to LCPLL failed\n"); @@ -1119,7 +1112,7 @@ static void skl_dpll0_enable(struct intel_display *display, int vco) intel_de_rmw(display, LCPLL1_CTL, 0, LCPLL_PLL_ENABLE); - if (intel_de_wait_for_set(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) + if (intel_de_wait_for_set_ms(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) drm_err(display->drm, "DPLL0 not locked\n"); display->cdclk.hw.vco = vco; @@ -1133,7 +1126,7 @@ static void skl_dpll0_disable(struct intel_display *display) intel_de_rmw(display, LCPLL1_CTL, LCPLL_PLL_ENABLE, 0); - if (intel_de_wait_for_clear(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) + if (intel_de_wait_for_clear_ms(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) drm_err(display->drm, "Couldn't disable DPLL0\n"); display->cdclk.hw.vco = 0; @@ -1540,6 +1533,41 @@ static const struct intel_cdclk_vals xe3lpd_cdclk_table[] = { {} }; +static const struct intel_cdclk_vals xe3p_lpd_cdclk_table[] = { + { .refclk = 38400, .cdclk = 151200, .ratio = 21, .waveform = 0xa4a4 }, + { .refclk = 38400, .cdclk = 176400, .ratio = 21, .waveform = 0xaa54 }, + { .refclk = 38400, .cdclk = 201600, .ratio = 21, .waveform = 0xaaaa }, + { .refclk = 38400, .cdclk = 226800, .ratio = 21, .waveform = 0xad5a }, + { .refclk = 38400, .cdclk = 252000, .ratio = 21, .waveform = 0xb6b6 }, + { .refclk = 38400, .cdclk = 277200, .ratio = 21, .waveform = 0xdbb6 }, + { .refclk = 38400, .cdclk = 302400, .ratio = 21, .waveform = 0xeeee }, + { .refclk = 38400, .cdclk = 327600, .ratio = 21, .waveform = 0xf7de }, + { .refclk = 38400, .cdclk = 352800, .ratio = 21, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 378000, .ratio = 21, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 403200, .ratio = 21, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 422400, .ratio = 22, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 441600, .ratio = 23, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 460800, .ratio = 24, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 499200, .ratio = 26, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 518400, .ratio = 27, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 537600, .ratio = 28, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 576000, .ratio = 30, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 595200, .ratio = 31, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 614400, .ratio = 32, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 633600, .ratio = 33, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 672000, .ratio = 35, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 691200, .ratio = 36, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 710400, .ratio = 37, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 729600, .ratio = 38, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 748800, .ratio = 39, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 768000, .ratio = 40, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 787200, .ratio = 41, .waveform = 0xffff }, + {} +}; + static const int cdclk_squash_len = 16; static int cdclk_squash_divider(u16 waveform) @@ -1567,7 +1595,7 @@ static int bxt_calc_cdclk(struct intel_display *display, int min_cdclk) drm_WARN(display->drm, 1, "Cannot satisfy minimum cdclk %d with refclk %u\n", min_cdclk, display->cdclk.hw.ref); - return 0; + return display->cdclk.max_cdclk_freq; } static int bxt_calc_cdclk_pll_vco(struct intel_display *display, int cdclk) @@ -1805,8 +1833,8 @@ static void bxt_de_pll_disable(struct intel_display *display) intel_de_write(display, BXT_DE_PLL_ENABLE, 0); /* Timeout 200us */ - if (intel_de_wait_for_clear(display, - BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) + if (intel_de_wait_for_clear_ms(display, + BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(display->drm, "timeout waiting for DE PLL unlock\n"); display->cdclk.hw.vco = 0; @@ -1822,8 +1850,8 @@ static void bxt_de_pll_enable(struct intel_display *display, int vco) intel_de_write(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); /* Timeout 200us */ - if (intel_de_wait_for_set(display, - BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) + if (intel_de_wait_for_set_ms(display, + BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(display->drm, "timeout waiting for DE PLL lock\n"); display->cdclk.hw.vco = vco; @@ -1835,7 +1863,7 @@ static void icl_cdclk_pll_disable(struct intel_display *display) BXT_DE_PLL_PLL_ENABLE, 0); /* Timeout 200us */ - if (intel_de_wait_for_clear(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) + if (intel_de_wait_for_clear_ms(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(display->drm, "timeout waiting for CDCLK PLL unlock\n"); display->cdclk.hw.vco = 0; @@ -1853,7 +1881,7 @@ static void icl_cdclk_pll_enable(struct intel_display *display, int vco) intel_de_write(display, BXT_DE_PLL_ENABLE, val); /* Timeout 200us */ - if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) + if (intel_de_wait_for_set_ms(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(display->drm, "timeout waiting for CDCLK PLL lock\n"); display->cdclk.hw.vco = vco; @@ -1873,8 +1901,8 @@ static void adlp_cdclk_pll_crawl(struct intel_display *display, int vco) intel_de_write(display, BXT_DE_PLL_ENABLE, val); /* Timeout 200us */ - if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, - BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1)) + if (intel_de_wait_for_set_ms(display, BXT_DE_PLL_ENABLE, + BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1)) drm_err(display->drm, "timeout waiting for FREQ change request ack\n"); val &= ~BXT_DE_PLL_FREQ_REQ; @@ -2600,6 +2628,12 @@ static void intel_set_cdclk(struct intel_display *display, } } +static bool dg2_power_well_count(struct intel_display *display, + const struct intel_cdclk_state *cdclk_state) +{ + return display->platform.dg2 ? hweight8(cdclk_state->active_pipes) : 0; +} + static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); @@ -2612,16 +2646,16 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state) if (!intel_cdclk_changed(&old_cdclk_state->actual, &new_cdclk_state->actual) && - new_cdclk_state->active_pipes == - old_cdclk_state->active_pipes) + dg2_power_well_count(display, old_cdclk_state) == + dg2_power_well_count(display, new_cdclk_state)) return; /* According to "Sequence Before Frequency Change", voltage level set to 0x3 */ voltage_level = DISPLAY_TO_PCODE_VOLTAGE_MAX; change_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk; - update_pipe_count = hweight8(new_cdclk_state->active_pipes) > - hweight8(old_cdclk_state->active_pipes); + update_pipe_count = dg2_power_well_count(display, new_cdclk_state) > + dg2_power_well_count(display, old_cdclk_state); /* * According to "Sequence Before Frequency Change", @@ -2639,7 +2673,7 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state) * no action if it is decreasing, before the change */ if (update_pipe_count) - num_active_pipes = hweight8(new_cdclk_state->active_pipes); + num_active_pipes = dg2_power_well_count(display, new_cdclk_state); intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk, change_cdclk, update_pipe_count); @@ -2659,8 +2693,8 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state) voltage_level = new_cdclk_state->actual.voltage_level; update_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk; - update_pipe_count = hweight8(new_cdclk_state->active_pipes) < - hweight8(old_cdclk_state->active_pipes); + update_pipe_count = dg2_power_well_count(display, new_cdclk_state) < + dg2_power_well_count(display, old_cdclk_state); /* * According to "Sequence After Frequency Change", @@ -2676,7 +2710,7 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state) * no action if it is increasing, after the change */ if (update_pipe_count) - num_active_pipes = hweight8(new_cdclk_state->active_pipes); + num_active_pipes = dg2_power_well_count(display, new_cdclk_state); intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk, update_cdclk, update_pipe_count); @@ -2711,6 +2745,9 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) struct intel_cdclk_config cdclk_config; enum pipe pipe; + if (!new_cdclk_state) + return; + if (!intel_cdclk_changed(&old_cdclk_state->actual, &new_cdclk_state->actual)) return; @@ -2763,6 +2800,9 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) intel_atomic_get_new_cdclk_state(state); enum pipe pipe; + if (!new_cdclk_state) + return; + if (!intel_cdclk_changed(&old_cdclk_state->actual, &new_cdclk_state->actual)) return; @@ -2800,16 +2840,20 @@ static int intel_cdclk_guardband(struct intel_display *display) return 90; } -static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) +static int _intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state, int pixel_rate) { struct intel_display *display = to_intel_display(crtc_state); int ppc = intel_cdclk_ppc(display, crtc_state->double_wide); int guardband = intel_cdclk_guardband(display); - int pixel_rate = crtc_state->pixel_rate; return DIV_ROUND_UP(pixel_rate * 100, guardband * ppc); } +static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) +{ + return _intel_pixel_rate_to_cdclk(crtc_state, crtc_state->pixel_rate); +} + static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -2818,12 +2862,12 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) int min_cdclk = 0; for_each_intel_plane_on_crtc(display->drm, crtc, plane) - min_cdclk = max(min_cdclk, crtc_state->min_cdclk[plane->id]); + min_cdclk = max(min_cdclk, crtc_state->plane_min_cdclk[plane->id]); return min_cdclk; } -static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) +int intel_crtc_min_cdclk(const struct intel_crtc_state *crtc_state) { int min_cdclk; @@ -2831,6 +2875,8 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat return 0; min_cdclk = intel_pixel_rate_to_cdclk(crtc_state); + min_cdclk = max(min_cdclk, intel_crtc_bw_min_cdclk(crtc_state)); + min_cdclk = max(min_cdclk, intel_fbc_min_cdclk(crtc_state)); min_cdclk = max(min_cdclk, hsw_ips_min_cdclk(crtc_state)); min_cdclk = max(min_cdclk, intel_audio_min_cdclk(crtc_state)); min_cdclk = max(min_cdclk, vlv_dsi_min_cdclk(crtc_state)); @@ -2840,51 +2886,110 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat return min_cdclk; } -static int intel_compute_min_cdclk(struct intel_atomic_state *state) +static int intel_cdclk_update_crtc_min_cdclk(struct intel_atomic_state *state, + struct intel_crtc *crtc, + int old_min_cdclk, int new_min_cdclk, + bool *need_cdclk_calc) { struct intel_display *display = to_intel_display(state); - struct intel_cdclk_state *cdclk_state = - intel_atomic_get_new_cdclk_state(state); - const struct intel_bw_state *bw_state; - struct intel_crtc *crtc; - struct intel_crtc_state *crtc_state; - int min_cdclk, i; - enum pipe pipe; + struct intel_cdclk_state *cdclk_state; + bool allow_cdclk_decrease = intel_any_crtc_needs_modeset(state); + int ret; - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { - int ret; + if (new_min_cdclk == old_min_cdclk) + return 0; - min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); - if (min_cdclk < 0) - return min_cdclk; + if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk) + return 0; - if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk) - continue; + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); - cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; + old_min_cdclk = cdclk_state->min_cdclk[crtc->pipe]; - ret = intel_atomic_lock_global_state(&cdclk_state->base); - if (ret) - return ret; - } + if (new_min_cdclk == old_min_cdclk) + return 0; + + if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk) + return 0; + + cdclk_state->min_cdclk[crtc->pipe] = new_min_cdclk; - bw_state = intel_atomic_get_new_bw_state(state); - if (bw_state) { - min_cdclk = intel_bw_min_cdclk(display, bw_state); + ret = intel_atomic_lock_global_state(&cdclk_state->base); + if (ret) + return ret; - if (cdclk_state->bw_min_cdclk != min_cdclk) { - int ret; + *need_cdclk_calc = true; - cdclk_state->bw_min_cdclk = min_cdclk; + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] min cdclk: %d kHz -> %d kHz\n", + crtc->base.base.id, crtc->base.name, + old_min_cdclk, new_min_cdclk); - ret = intel_atomic_lock_global_state(&cdclk_state->base); - if (ret) - return ret; - } - } + return 0; +} + +int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state, + int old_min_cdclk, int new_min_cdclk, + bool *need_cdclk_calc) +{ + struct intel_display *display = to_intel_display(state); + struct intel_cdclk_state *cdclk_state; + bool allow_cdclk_decrease = intel_any_crtc_needs_modeset(state); + int ret; + + if (new_min_cdclk == old_min_cdclk) + return 0; + + if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk) + return 0; + + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); + + old_min_cdclk = cdclk_state->dbuf_bw_min_cdclk; - min_cdclk = max(cdclk_state->force_min_cdclk, - cdclk_state->bw_min_cdclk); + if (new_min_cdclk == old_min_cdclk) + return 0; + + if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk) + return 0; + + cdclk_state->dbuf_bw_min_cdclk = new_min_cdclk; + + ret = intel_atomic_lock_global_state(&cdclk_state->base); + if (ret) + return ret; + + *need_cdclk_calc = true; + + drm_dbg_kms(display->drm, + "dbuf bandwidth min cdclk: %d kHz -> %d kHz\n", + old_min_cdclk, new_min_cdclk); + + return 0; +} + +static bool glk_cdclk_audio_wa_needed(struct intel_display *display, + const struct intel_cdclk_state *cdclk_state) +{ + return display->platform.geminilake && + cdclk_state->enabled_pipes && + !is_power_of_2(cdclk_state->enabled_pipes); +} + +static int intel_compute_min_cdclk(struct intel_atomic_state *state) +{ + struct intel_display *display = to_intel_display(state); + struct intel_cdclk_state *cdclk_state = + intel_atomic_get_new_cdclk_state(state); + enum pipe pipe; + int min_cdclk; + + min_cdclk = cdclk_state->force_min_cdclk; + min_cdclk = max(min_cdclk, cdclk_state->dbuf_bw_min_cdclk); for_each_pipe(display, pipe) min_cdclk = max(min_cdclk, cdclk_state->min_cdclk[pipe]); @@ -2896,8 +3001,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) * by changing the cd2x divider (see glk_cdclk_table[]) and * thus a full modeset won't be needed then. */ - if (display->platform.geminilake && cdclk_state->active_pipes && - !is_power_of_2(cdclk_state->active_pipes)) + if (glk_cdclk_audio_wa_needed(display, cdclk_state)) min_cdclk = max(min_cdclk, 2 * 96000); if (min_cdclk > display->cdclk.max_cdclk_freq) { @@ -3183,38 +3287,66 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state) return to_intel_cdclk_state(cdclk_state); } -int intel_cdclk_atomic_check(struct intel_atomic_state *state, - bool *need_cdclk_calc) +static int intel_cdclk_modeset_checks(struct intel_atomic_state *state, + bool *need_cdclk_calc) { + struct intel_display *display = to_intel_display(state); const struct intel_cdclk_state *old_cdclk_state; - const struct intel_cdclk_state *new_cdclk_state; - struct intel_plane_state __maybe_unused *plane_state; - struct intel_plane *plane; + struct intel_cdclk_state *new_cdclk_state; int ret; - int i; - /* - * active_planes bitmask has been updated, and potentially affected - * planes are part of the state. We can now compute the minimum cdclk - * for each plane. - */ - for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); - if (ret) - return ret; - } + if (!intel_any_crtc_enable_changed(state) && + !intel_any_crtc_active_changed(state)) + return 0; + + new_cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(new_cdclk_state)) + return PTR_ERR(new_cdclk_state); + + old_cdclk_state = intel_atomic_get_old_cdclk_state(state); + + new_cdclk_state->enabled_pipes = + intel_calc_enabled_pipes(state, old_cdclk_state->enabled_pipes); - ret = intel_bw_calc_min_cdclk(state, need_cdclk_calc); + new_cdclk_state->active_pipes = + intel_calc_active_pipes(state, old_cdclk_state->active_pipes); + + ret = intel_atomic_lock_global_state(&new_cdclk_state->base); if (ret) return ret; - old_cdclk_state = intel_atomic_get_old_cdclk_state(state); - new_cdclk_state = intel_atomic_get_new_cdclk_state(state); + if (!old_cdclk_state->active_pipes != !new_cdclk_state->active_pipes) + *need_cdclk_calc = true; - if (new_cdclk_state && - old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) + if (glk_cdclk_audio_wa_needed(display, old_cdclk_state) != + glk_cdclk_audio_wa_needed(display, new_cdclk_state)) *need_cdclk_calc = true; + if (dg2_power_well_count(display, old_cdclk_state) != + dg2_power_well_count(display, new_cdclk_state)) + *need_cdclk_calc = true; + + return 0; +} + +static int intel_crtcs_calc_min_cdclk(struct intel_atomic_state *state, + bool *need_cdclk_calc) +{ + const struct intel_crtc_state *old_crtc_state; + const struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int i, ret; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + ret = intel_cdclk_update_crtc_min_cdclk(state, crtc, + old_crtc_state->min_cdclk, + new_crtc_state->min_cdclk, + need_cdclk_calc); + if (ret) + return ret; + } + return 0; } @@ -3250,18 +3382,17 @@ static bool intel_cdclk_need_serialize(struct intel_display *display, const struct intel_cdclk_state *old_cdclk_state, const struct intel_cdclk_state *new_cdclk_state) { - bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) != - hweight8(new_cdclk_state->active_pipes); - bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual, - &new_cdclk_state->actual); /* - * We need to poke hw for gen >= 12, because we notify PCode if + * We need to poke hw for DG2, because we notify PCode if * pipe power well count changes. */ - return cdclk_changed || (display->platform.dg2 && power_well_cnt_changed); + return intel_cdclk_changed(&old_cdclk_state->actual, + &new_cdclk_state->actual) || + dg2_power_well_count(display, old_cdclk_state) != + dg2_power_well_count(display, new_cdclk_state); } -int intel_modeset_calc_cdclk(struct intel_atomic_state *state) +static int intel_modeset_calc_cdclk(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); const struct intel_cdclk_state *old_cdclk_state; @@ -3275,9 +3406,6 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) old_cdclk_state = intel_atomic_get_old_cdclk_state(state); - new_cdclk_state->active_pipes = - intel_calc_active_pipes(state, old_cdclk_state->active_pipes); - ret = intel_cdclk_modeset_calc_cdclk(state); if (ret) return ret; @@ -3290,9 +3418,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) ret = intel_atomic_serialize_global_state(&new_cdclk_state->base); if (ret) return ret; - } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes || - old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk || - intel_cdclk_changed(&old_cdclk_state->logical, + } else if (intel_cdclk_changed(&old_cdclk_state->logical, &new_cdclk_state->logical)) { ret = intel_atomic_lock_global_state(&new_cdclk_state->base); if (ret) @@ -3374,14 +3500,55 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) return 0; } +int intel_cdclk_atomic_check(struct intel_atomic_state *state) +{ + const struct intel_cdclk_state *old_cdclk_state; + struct intel_cdclk_state *new_cdclk_state; + bool need_cdclk_calc = false; + int ret; + + ret = intel_cdclk_modeset_checks(state, &need_cdclk_calc); + if (ret) + return ret; + + ret = intel_crtcs_calc_min_cdclk(state, &need_cdclk_calc); + if (ret) + return ret; + + ret = intel_dbuf_bw_calc_min_cdclk(state, &need_cdclk_calc); + if (ret) + return ret; + + old_cdclk_state = intel_atomic_get_old_cdclk_state(state); + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); + + if (new_cdclk_state && + old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) { + ret = intel_atomic_lock_global_state(&new_cdclk_state->base); + if (ret) + return ret; + + need_cdclk_calc = true; + } + + if (need_cdclk_calc) { + ret = intel_modeset_calc_cdclk(state); + if (ret) + return ret; + } + + return 0; +} + void intel_cdclk_update_hw_state(struct intel_display *display) { - const struct intel_bw_state *bw_state = - to_intel_bw_state(display->bw.obj.state); + const struct intel_dbuf_bw_state *dbuf_bw_state = + to_intel_dbuf_bw_state(display->dbuf_bw.obj.state); struct intel_cdclk_state *cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state); struct intel_crtc *crtc; + cdclk_state->enabled_pipes = 0; cdclk_state->active_pipes = 0; for_each_intel_crtc(display->drm, crtc) { @@ -3389,14 +3556,16 @@ void intel_cdclk_update_hw_state(struct intel_display *display) to_intel_crtc_state(crtc->base.state); enum pipe pipe = crtc->pipe; + if (crtc_state->hw.enable) + cdclk_state->enabled_pipes |= BIT(pipe); if (crtc_state->hw.active) cdclk_state->active_pipes |= BIT(pipe); - cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state); + cdclk_state->min_cdclk[pipe] = crtc_state->min_cdclk; cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level; } - cdclk_state->bw_min_cdclk = intel_bw_min_cdclk(display, bw_state); + cdclk_state->dbuf_bw_min_cdclk = intel_dbuf_bw_min_cdclk(display, dbuf_bw_state); } void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc) @@ -3425,7 +3594,9 @@ static int intel_compute_max_dotclk(struct intel_display *display) */ void intel_update_max_cdclk(struct intel_display *display) { - if (DISPLAY_VERx100(display) >= 3002) { + if (DISPLAY_VER(display) >= 35) { + display->cdclk.max_cdclk_freq = 787200; + } else if (DISPLAY_VERx100(display) >= 3002) { display->cdclk.max_cdclk_freq = 480000; } else if (DISPLAY_VER(display) >= 30) { display->cdclk.max_cdclk_freq = 691200; @@ -3565,13 +3736,6 @@ static int pch_rawclk(struct intel_display *display) return (intel_de_read(display, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; } -static int vlv_hrawclk(struct intel_display *display) -{ - /* RAWCLK_FREQ_VLV register updated from power well code */ - return vlv_get_cck_clock_hpll(display->drm, "hrawclk", - CCK_DISPLAY_REF_CLOCK_CONTROL); -} - static int i9xx_hrawclk(struct intel_display *display) { struct drm_i915_private *i915 = to_i915(display->drm); @@ -3605,7 +3769,7 @@ u32 intel_read_rawclk(struct intel_display *display) else if (HAS_PCH_SPLIT(display)) freq = pch_rawclk(display); else if (display->platform.valleyview || display->platform.cherryview) - freq = vlv_hrawclk(display); + freq = vlv_clock_get_hrawclk(display->drm); else if (DISPLAY_VER(display) >= 3) freq = i9xx_hrawclk(display); else @@ -3783,7 +3947,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = { */ void intel_init_cdclk_hooks(struct intel_display *display) { - if (DISPLAY_VER(display) >= 30) { + if (DISPLAY_VER(display) >= 35) { + display->funcs.cdclk = &xe3lpd_cdclk_funcs; + display->cdclk.table = xe3p_lpd_cdclk_table; + } else if (DISPLAY_VER(display) >= 30) { display->funcs.cdclk = &xe3lpd_cdclk_funcs; display->cdclk.table = xe3lpd_cdclk_table; } else if (DISPLAY_VER(display) >= 20) { @@ -3897,11 +4064,6 @@ int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe return cdclk_state->min_cdclk[pipe]; } -int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state) -{ - return cdclk_state->bw_min_cdclk; -} - bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state) { const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; @@ -3933,3 +4095,75 @@ void intel_cdclk_read_hw(struct intel_display *display) cdclk_state->actual = display->cdclk.hw; cdclk_state->logical = display->cdclk.hw; } + +static int calc_cdclk(const struct intel_crtc_state *crtc_state, int min_cdclk) +{ + struct intel_display *display = to_intel_display(crtc_state); + + if (DISPLAY_VER(display) >= 10 || display->platform.broxton) { + return bxt_calc_cdclk(display, min_cdclk); + } else if (DISPLAY_VER(display) == 9) { + int vco; + + vco = display->cdclk.skl_preferred_vco_freq; + if (vco == 0) + vco = 8100000; + + return skl_calc_cdclk(min_cdclk, vco); + } else if (display->platform.broadwell) { + return bdw_calc_cdclk(min_cdclk); + } else if (display->platform.cherryview || display->platform.valleyview) { + return vlv_calc_cdclk(display, min_cdclk); + } else { + return display->cdclk.max_cdclk_freq; + } +} + +static unsigned int _intel_cdclk_prefill_adj(const struct intel_crtc_state *crtc_state, + int clock, int min_cdclk) +{ + struct intel_display *display = to_intel_display(crtc_state); + int ppc = intel_cdclk_ppc(display, crtc_state->double_wide); + int cdclk = calc_cdclk(crtc_state, min_cdclk); + + return min(0x10000, DIV_ROUND_UP_ULL((u64)clock << 16, ppc * cdclk)); +} + +unsigned int intel_cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state) +{ + /* FIXME use the actual min_cdclk for the pipe here */ + return intel_cdclk_prefill_adjustment_worst(crtc_state); +} + +unsigned int intel_cdclk_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state) +{ + int clock = crtc_state->hw.pipe_mode.crtc_clock; + int min_cdclk; + + /* + * FIXME could perhaps consider a few more of the factors + * that go the per-crtc min_cdclk. Namely anything that + * only changes during full modesets. + * + * FIXME this assumes 1:1 scaling, but the other _worst() stuff + * assumes max downscaling, so the final result will be + * unrealistically bad. Figure out where the actual maximum value + * lies and use that to compute a more realistic worst case + * estimate... + */ + min_cdclk = _intel_pixel_rate_to_cdclk(crtc_state, clock); + + return _intel_cdclk_prefill_adj(crtc_state, clock, min_cdclk); +} + +int intel_cdclk_min_cdclk_for_prefill(const struct intel_crtc_state *crtc_state, + unsigned int prefill_lines_unadjusted, + unsigned int prefill_lines_available) +{ + struct intel_display *display = to_intel_display(crtc_state); + const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; + int ppc = intel_cdclk_ppc(display, crtc_state->double_wide); + + return DIV_ROUND_UP_ULL(mul_u32_u32(pipe_mode->crtc_clock, prefill_lines_unadjusted), + ppc * prefill_lines_available); +} diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index cacee598af0e..1ff7d078b42c 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -38,16 +38,17 @@ void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state); void intel_cdclk_dump_config(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, const char *context); -int intel_modeset_calc_cdclk(struct intel_atomic_state *state); void intel_cdclk_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config); -int intel_cdclk_atomic_check(struct intel_atomic_state *state, - bool *need_cdclk_calc); +int intel_cdclk_atomic_check(struct intel_atomic_state *state); int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus); struct intel_cdclk_state * intel_atomic_get_cdclk_state(struct intel_atomic_state *state); void intel_cdclk_update_hw_state(struct intel_display *display); void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc); +int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state, + int old_min_cdclk, int new_min_cdclk, + bool *need_cdclk_calc); #define to_intel_cdclk_state(global_state) \ container_of_const((global_state), struct intel_cdclk_state, base) @@ -64,9 +65,16 @@ int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state); int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state); int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state); int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe); -int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state); bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state); void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk); void intel_cdclk_read_hw(struct intel_display *display); +unsigned int intel_cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state); +unsigned int intel_cdclk_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state); +int intel_cdclk_min_cdclk_for_prefill(const struct intel_crtc_state *crtc_state, + unsigned int prefill_lines_unadjusted, + unsigned int prefill_lines_available); + +int intel_crtc_min_cdclk(const struct intel_crtc_state *crtc_state); + #endif /* __INTEL_CDCLK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 671db6926e4c..a217a67ceb43 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -24,12 +24,12 @@ #include <drm/drm_print.h> -#include "i915_utils.h" #include "i9xx_plane_regs.h" #include "intel_color.h" #include "intel_color_regs.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dsb.h" #include "intel_vrr.h" @@ -1090,18 +1090,19 @@ static void skl_get_config(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - u32 tmp; crtc_state->gamma_mode = hsw_read_gamma_mode(crtc); crtc_state->csc_mode = ilk_read_csc_mode(crtc); - tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe)); + if (DISPLAY_VER(display) < 35) { + u32 tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe)); - if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) - crtc_state->gamma_enable = true; + if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) + crtc_state->gamma_enable = true; - if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) - crtc_state->csc_enable = true; + if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) + crtc_state->csc_enable = true; + } } static void skl_color_commit_arm(struct intel_dsb *dsb, @@ -2013,7 +2014,7 @@ void intel_color_prepare_commit(struct intel_atomic_state *state, if (crtc_state->use_dsb && intel_color_uses_chained_dsb(crtc_state)) { intel_vrr_send_push(crtc_state->dsb_color, crtc_state); - intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color); + intel_dsb_wait_for_delayed_vblank(state, crtc_state->dsb_color); intel_vrr_check_push_sent(crtc_state->dsb_color, crtc_state); intel_dsb_interrupt(crtc_state->dsb_color); } diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 112749f97c26..f401558ac14e 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -5,12 +5,12 @@ #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_combo_phy.h" #include "intel_combo_phy_regs.h" #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #define for_each_combo_phy(__display, __phy) \ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \ diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 6a55854db5b6..913d90a7a508 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -28,10 +28,11 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "i915_drv.h" -#include "i915_utils.h" +#include "i915_utils.h" /* for i915_inject_probe_failure() */ #include "intel_connector.h" #include "intel_display_core.h" #include "intel_display_debugfs.h" diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 31e68047f217..82e89cdbe5a5 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -498,10 +498,10 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector) intel_de_write(display, crt->adpa_reg, adpa); - if (intel_de_wait_for_clear(display, - crt->adpa_reg, - ADPA_CRT_HOTPLUG_FORCE_TRIGGER, - 1000)) + if (intel_de_wait_for_clear_ms(display, + crt->adpa_reg, + ADPA_CRT_HOTPLUG_FORCE_TRIGGER, + 1000)) drm_dbg_kms(display->drm, "timed out waiting for FORCE_TRIGGER"); @@ -553,8 +553,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) intel_de_write(display, crt->adpa_reg, adpa); - if (intel_de_wait_for_clear(display, crt->adpa_reg, - ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) { + if (intel_de_wait_for_clear_ms(display, crt->adpa_reg, + ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) { drm_dbg_kms(display->drm, "timed out waiting for FORCE_TRIGGER"); intel_de_write(display, crt->adpa_reg, save_adpa); @@ -604,8 +604,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) CRT_HOTPLUG_FORCE_DETECT, CRT_HOTPLUG_FORCE_DETECT); /* wait for FORCE_DETECT to go off */ - if (intel_de_wait_for_clear(display, PORT_HOTPLUG_EN(display), - CRT_HOTPLUG_FORCE_DETECT, 1000)) + if (intel_de_wait_for_clear_ms(display, PORT_HOTPLUG_EN(display), + CRT_HOTPLUG_FORCE_DETECT, 1000)) drm_dbg_kms(display->drm, "timed out waiting for FORCE_DETECT to go off"); } diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index a187db6df2d3..9d2a23c96c61 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include <drm/drm_vblank_work.h> @@ -84,8 +85,13 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) if (!crtc->active) return 0; - if (!vblank->max_vblank_count) - return (u32)drm_crtc_accurate_vblank_count(&crtc->base); + if (!vblank->max_vblank_count) { + /* On preempt-rt we cannot take the vblank spinlock since this function is called from tracepoints */ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + return (u32)drm_crtc_vblank_count(&crtc->base); + else + return (u32)drm_crtc_accurate_vblank_count(&crtc->base); + } return crtc->base.funcs->get_vblank_counter(&crtc->base); } @@ -390,6 +396,9 @@ int intel_crtc_init(struct intel_display *display, enum pipe pipe) drm_WARN_ON(display->drm, drm_crtc_index(&crtc->base) != crtc->pipe); + if (HAS_CASF(display)) + drm_crtc_create_sharpness_strength_property(&crtc->base); + return 0; fail: @@ -748,3 +757,89 @@ void intel_pipe_update_end(struct intel_atomic_state *state, out: intel_psr_unlock(new_crtc_state); } + +bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + return old_crtc_state->hw.enable != new_crtc_state->hw.enable; +} + +bool intel_any_crtc_enable_changed(struct intel_atomic_state *state) +{ + const struct intel_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (intel_crtc_enable_changed(old_crtc_state, new_crtc_state)) + return true; + } + + return false; +} + +bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + return old_crtc_state->hw.active != new_crtc_state->hw.active; +} + +bool intel_any_crtc_active_changed(struct intel_atomic_state *state) +{ + const struct intel_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (intel_crtc_active_changed(old_crtc_state, new_crtc_state)) + return true; + } + + return false; +} + +unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state) +{ + /* + * We assume cursors are small enough + * to not cause bandwidth problems. + */ + return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR)); +} + +unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + unsigned int data_rate = 0; + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) { + /* + * We assume cursors are small enough + * to not cause bandwidth problems. + */ + if (plane_id == PLANE_CURSOR) + continue; + + data_rate += crtc_state->data_rate[plane_id]; + + if (DISPLAY_VER(display) < 11) + data_rate += crtc_state->data_rate_y[plane_id]; + } + + return data_rate; +} + +/* "Maximum Pipe Read Bandwidth" */ +int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + if (DISPLAY_VER(display) < 12) + return 0; + + return DIV_ROUND_UP_ULL(mul_u32_u32(intel_crtc_bw_data_rate(crtc_state), 10), 512); +} diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h index 8c14ff8b391e..07917e8a9ae3 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.h +++ b/drivers/gpu/drm/i915/display/intel_crtc.h @@ -58,4 +58,15 @@ void intel_wait_for_vblank_if_active(struct intel_display *display, enum pipe pipe); void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc); +bool intel_any_crtc_enable_changed(struct intel_atomic_state *state); +bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state); +bool intel_any_crtc_active_changed(struct intel_atomic_state *state); +bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state); + +unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state); +unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state); +int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state); + #endif diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 0c7f91046996..c2a6217c2262 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -289,10 +289,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, drm_printf(&p, "scanline offset: %d\n", intel_crtc_scanline_offset(pipe_config)); - drm_printf(&p, "vblank delay: %d, framestart delay: %d, MSA timing delay: %d\n", - pipe_config->hw.adjusted_mode.crtc_vblank_start - - pipe_config->hw.adjusted_mode.crtc_vdisplay, - pipe_config->framestart_delay, pipe_config->msa_timing_delay); + drm_printf(&p, "framestart delay: %d, MSA timing delay: %d, set context latency: %d\n", + pipe_config->framestart_delay, pipe_config->msa_timing_delay, + pipe_config->set_context_latency); drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n", str_yes_no(pipe_config->vrr.enable), @@ -313,9 +312,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, drm_printf(&p, "pipe mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&pipe_config->hw.pipe_mode)); intel_dump_crtc_timings(&p, &pipe_config->hw.pipe_mode); - drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n", + drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d, min cdclk %d\n", pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src), - pipe_config->pixel_rate); + pipe_config->pixel_rate, pipe_config->min_cdclk); drm_printf(&p, "linetime: %d, ips linetime: %d\n", pipe_config->linetime, pipe_config->ips_linetime); @@ -373,6 +372,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, intel_vdsc_state_dump(&p, 0, pipe_config); + drm_printf(&p, "sharpness strength: %d, sharpness tap size: %d, sharpness enable: %d\n", + pipe_config->hw.casf_params.strength, + pipe_config->hw.casf_params.win_size, + pipe_config->hw.casf_params.casf_enable); + dump_planes: if (!state) return; diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index d4d181f9dca5..a10b2425b94d 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -12,13 +12,13 @@ #include <drm/drm_print.h> #include <drm/drm_vblank.h> -#include "i915_utils.h" #include "intel_atomic.h" #include "intel_cursor.h" #include "intel_cursor_regs.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_frontbuffer.h" @@ -182,8 +182,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state, static unsigned int i845_cursor_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { return 2048; } @@ -343,8 +343,8 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane, static unsigned int i9xx_cursor_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { return plane->base.dev->mode_config.cursor_width * 4; } @@ -662,7 +662,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb, cntl = plane_state->ctl | i9xx_cursor_ctl_crtc(crtc_state); - if (width != height) + if (DISPLAY_VER(display) < 14 && width != height) fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1); base = plane_state->surf; @@ -1092,3 +1092,23 @@ fail: return ERR_PTR(ret); } + +void intel_cursor_mode_config_init(struct intel_display *display) +{ + struct drm_mode_config *mode_config = &display->drm->mode_config; + + if (display->platform.i845g) { + mode_config->cursor_width = 64; + mode_config->cursor_height = 1023; + } else if (display->platform.i865g) { + mode_config->cursor_width = 512; + mode_config->cursor_height = 1023; + } else if (display->platform.i830 || display->platform.i85x || + display->platform.i915g || display->platform.i915gm) { + mode_config->cursor_width = 64; + mode_config->cursor_height = 64; + } else { + mode_config->cursor_width = 256; + mode_config->cursor_height = 256; + } +} diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h index 65a9e7eb88c2..7c269d7381ad 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.h +++ b/drivers/gpu/drm/i915/display/intel_cursor.h @@ -17,4 +17,6 @@ intel_cursor_plane_create(struct intel_display *display, void intel_cursor_unpin_work(struct kthread_work *base); +void intel_cursor_mode_config_init(struct intel_display *display); + #endif diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 801235a5bc0a..d98b4cf6b60e 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -8,7 +8,6 @@ #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_alpm.h" #include "intel_cx0_phy.h" #include "intel_cx0_phy_regs.h" @@ -16,16 +15,15 @@ #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_hdmi.h" +#include "intel_lt_phy.h" #include "intel_panel.h" #include "intel_psr.h" #include "intel_snps_hdmi_pll.h" #include "intel_tc.h" -#define MB_WRITE_COMMITTED true -#define MB_WRITE_UNCOMMITTED false - #define for_each_cx0_lane_in_mask(__lane_mask, __lane) \ for ((__lane) = 0; (__lane) < 2; (__lane)++) \ for_each_if((__lane_mask) & BIT(__lane)) @@ -39,14 +37,12 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder) struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - /* PTL doesn't have a PHY connected to PORT B; as such, - * there will never be a case where PTL uses PHY B. - * WCL uses PORT A and B with the C10 PHY. - * Reusing the condition for WCL and extending it for PORT B - * should not cause any issues for PTL. - */ - if (display->platform.pantherlake && phy < PHY_C) - return true; + if (display->platform.pantherlake) { + if (display->platform.pantherlake_wildcatlake) + return phy <= PHY_B; + else + return phy == PHY_A; + } if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C) return true; @@ -130,8 +126,8 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref); } -static void intel_clear_response_ready_flag(struct intel_encoder *encoder, - int lane) +void intel_clear_response_ready_flag(struct intel_encoder *encoder, + int lane) { struct intel_display *display = to_intel_display(encoder); @@ -140,7 +136,7 @@ static void intel_clear_response_ready_flag(struct intel_encoder *encoder, 0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET); } -static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) +void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) { struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; @@ -149,9 +145,9 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET); - if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), - XELPDP_PORT_M2P_TRANSACTION_RESET, - XELPDP_MSGBUS_TIMEOUT_SLOW)) { + if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), + XELPDP_PORT_M2P_TRANSACTION_RESET, + XELPDP_MSGBUS_TIMEOUT_MS)) { drm_err_once(display->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy)); @@ -161,19 +157,17 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) intel_clear_response_ready_flag(encoder, lane); } -static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, - int command, int lane, u32 *val) +int intel_cx0_wait_for_ack(struct intel_encoder *encoder, + int command, int lane, u32 *val) { struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); - if (intel_de_wait_custom(display, - XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), - XELPDP_PORT_P2M_RESPONSE_READY, - XELPDP_PORT_P2M_RESPONSE_READY, - XELPDP_MSGBUS_TIMEOUT_FAST_US, - XELPDP_MSGBUS_TIMEOUT_SLOW, val)) { + if (intel_de_wait_ms(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), + XELPDP_PORT_P2M_RESPONSE_READY, + XELPDP_PORT_P2M_RESPONSE_READY, + XELPDP_MSGBUS_TIMEOUT_MS, val)) { drm_dbg_kms(display->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", phy_name(phy), *val); @@ -218,9 +212,9 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder, int ack; u32 val; - if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), - XELPDP_PORT_M2P_TRANSACTION_PENDING, - XELPDP_MSGBUS_TIMEOUT_SLOW)) { + if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), + XELPDP_PORT_M2P_TRANSACTION_PENDING, + XELPDP_MSGBUS_TIMEOUT_MS)) { drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); @@ -273,8 +267,7 @@ static u8 __intel_cx0_read(struct intel_encoder *encoder, return 0; } -static u8 intel_cx0_read(struct intel_encoder *encoder, - u8 lane_mask, u16 addr) +u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr) { int lane = lane_mask_to_lane(lane_mask); @@ -290,9 +283,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, int ack; u32 val; - if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), - XELPDP_PORT_M2P_TRANSACTION_PENDING, - XELPDP_MSGBUS_TIMEOUT_SLOW)) { + if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), + XELPDP_PORT_M2P_TRANSACTION_PENDING, + XELPDP_MSGBUS_TIMEOUT_MS)) { drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); @@ -306,9 +299,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, XELPDP_PORT_M2P_DATA(data) | XELPDP_PORT_M2P_ADDRESS(addr)); - if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), - XELPDP_PORT_M2P_TRANSACTION_PENDING, - XELPDP_MSGBUS_TIMEOUT_SLOW)) { + if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), + XELPDP_PORT_M2P_TRANSACTION_PENDING, + XELPDP_MSGBUS_TIMEOUT_MS)) { drm_dbg_kms(display->drm, "PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); @@ -361,8 +354,8 @@ static void __intel_cx0_write(struct intel_encoder *encoder, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i); } -static void intel_cx0_write(struct intel_encoder *encoder, - u8 lane_mask, u16 addr, u8 data, bool committed) +void intel_cx0_write(struct intel_encoder *encoder, + u8 lane_mask, u16 addr, u8 data, bool committed) { int lane; @@ -414,8 +407,8 @@ static void __intel_cx0_rmw(struct intel_encoder *encoder, __intel_cx0_write(encoder, lane, addr, val, committed); } -static void intel_cx0_rmw(struct intel_encoder *encoder, - u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed) +void intel_cx0_rmw(struct intel_encoder *encoder, + u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed) { u8 lane; @@ -2105,6 +2098,9 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state, return 0; } +static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, + const struct intel_c10pll_state *pll_state); + static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state) { @@ -2129,6 +2125,8 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0)); intel_cx0_phy_transaction_end(encoder, wakeref); + + pll_state->clock = intel_c10pll_calc_port_clock(encoder, pll_state); } static void intel_c10_pll_program(struct intel_display *display, @@ -2587,20 +2585,6 @@ static bool is_dp2(u32 clock) return false; } -static bool is_hdmi_frl(u32 clock) -{ - switch (clock) { - case 300000: /* 3 Gbps */ - case 600000: /* 6 Gbps */ - case 800000: /* 8 Gbps */ - case 1000000: /* 10 Gbps */ - case 1200000: /* 12 Gbps */ - return true; - default: - return false; - } -} - static bool intel_c20_protocol_switch_valid(struct intel_encoder *encoder) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); @@ -2614,7 +2598,7 @@ static int intel_get_c20_custom_width(u32 clock, bool dp) { if (dp && is_dp2(clock)) return 2; - else if (is_hdmi_frl(clock)) + else if (intel_hdmi_is_frl(clock)) return 1; else return 0; @@ -2626,11 +2610,13 @@ static void intel_c20_pll_program(struct intel_display *display, bool is_dp, int port_clock) { u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); + u8 serdes; bool cntx; int i; /* 1. Read current context selection */ - cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0); + cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & + PHY_C20_CONTEXT_TOGGLE; /* * 2. If there is a protocol switch from HDMI to DP or vice versa, clear @@ -2700,28 +2686,31 @@ static void intel_c20_pll_program(struct intel_display *display, MB_WRITE_COMMITTED); /* 5. For DP or 6. For HDMI */ - if (is_dp) { - intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE, - BIT(6) | PHY_C20_CUSTOM_SERDES_MASK, - BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(port_clock)), - MB_WRITE_COMMITTED); - } else { - intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE, - BIT(7) | PHY_C20_CUSTOM_SERDES_MASK, - is_hdmi_frl(port_clock) ? BIT(7) : 0, - MB_WRITE_COMMITTED); + serdes = 0; + if (is_dp) + serdes = PHY_C20_IS_DP | + PHY_C20_DP_RATE(intel_c20_get_dp_rate(port_clock)); + else if (intel_hdmi_is_frl(port_clock)) + serdes = PHY_C20_IS_HDMI_FRL; - intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE, - intel_c20_get_hdmi_rate(port_clock), - MB_WRITE_COMMITTED); - } + intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE, + PHY_C20_IS_DP | PHY_C20_DP_RATE_MASK | PHY_C20_IS_HDMI_FRL, + serdes, + MB_WRITE_COMMITTED); + + if (!is_dp) + intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE, + PHY_C20_HDMI_RATE_MASK, + intel_c20_get_hdmi_rate(port_clock), + MB_WRITE_COMMITTED); /* * 7. Write Vendor specific registers to toggle context setting to load * the updated programming toggle context bit */ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE, - BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED); + PHY_C20_CONTEXT_TOGGLE, cntx ? 0 : PHY_C20_CONTEXT_TOGGLE, + MB_WRITE_COMMITTED); } static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, @@ -2768,7 +2757,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder, val |= XELPDP_FORWARD_CLOCK_UNGATE; - if (!is_dp && is_hdmi_frl(port_clock)) + if (!is_dp && intel_hdmi_is_frl(port_clock)) val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK); else val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK); @@ -2808,8 +2797,8 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state) return val; } -static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder, - u8 lane_mask, u8 state) +void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder, + u8 lane_mask, u8 state) { struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; @@ -2823,9 +2812,9 @@ static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder, /* Wait for pending transactions.*/ for_each_cx0_lane_in_mask(lane_mask, lane) - if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), - XELPDP_PORT_M2P_TRANSACTION_PENDING, - XELPDP_MSGBUS_TIMEOUT_SLOW)) { + if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), + XELPDP_PORT_M2P_TRANSACTION_PENDING, + XELPDP_MSGBUS_TIMEOUT_MS)) { drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n", phy_name(phy)); @@ -2837,26 +2826,26 @@ static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder, intel_cx0_get_powerdown_update(lane_mask)); /* Update Timeout Value */ - if (intel_de_wait_custom(display, buf_ctl2_reg, - intel_cx0_get_powerdown_update(lane_mask), 0, - XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL)) + if (intel_de_wait_for_clear_ms(display, buf_ctl2_reg, + intel_cx0_get_powerdown_update(lane_mask), + XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS)) drm_warn(display->drm, - "PHY %c failed to bring out of Lane reset after %dus.\n", - phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); + "PHY %c failed to bring out of lane reset\n", + phy_name(phy)); } -static void intel_cx0_setup_powerdown(struct intel_encoder *encoder) +void intel_cx0_setup_powerdown(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), XELPDP_POWER_STATE_READY_MASK, - XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY)); + XELPDP_POWER_STATE_READY(XELPDP_P2_STATE_READY)); intel_de_rmw(display, XELPDP_PORT_BUF_CTL3(display, port), XELPDP_POWER_STATE_ACTIVE_MASK | XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, - XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) | + XELPDP_POWER_STATE_ACTIVE(XELPDP_P0_STATE_ACTIVE) | XELPDP_PLL_LANE_STAGGERING_DELAY(0)); } @@ -2898,48 +2887,47 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder, XELPDP_LANE_PHY_CURRENT_STATUS(1)) : XELPDP_LANE_PHY_CURRENT_STATUS(0); - if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL1(display, port), - XELPDP_PORT_BUF_SOC_PHY_READY, - XELPDP_PORT_BUF_SOC_PHY_READY, - XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL)) + if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL1(display, port), + XELPDP_PORT_BUF_SOC_PHY_READY, + XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US)) drm_warn(display->drm, - "PHY %c failed to bring out of SOC reset after %dus.\n", - phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US); + "PHY %c failed to bring out of SOC reset\n", + phy_name(phy)); intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, lane_pipe_reset); - if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port), - lane_phy_current_status, lane_phy_current_status, - XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL)) + if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_phy_current_status, + XELPDP_PORT_RESET_START_TIMEOUT_US)) drm_warn(display->drm, - "PHY %c failed to bring out of Lane reset after %dus.\n", - phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); + "PHY %c failed to bring out of lane reset\n", + phy_name(phy)); intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), intel_cx0_get_pclk_refclk_request(owned_lane_mask), intel_cx0_get_pclk_refclk_request(lane_mask)); - if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port), - intel_cx0_get_pclk_refclk_ack(owned_lane_mask), - intel_cx0_get_pclk_refclk_ack(lane_mask), - XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL)) + if (intel_de_wait_us(display, XELPDP_PORT_CLOCK_CTL(display, port), + intel_cx0_get_pclk_refclk_ack(owned_lane_mask), + intel_cx0_get_pclk_refclk_ack(lane_mask), + XELPDP_REFCLK_ENABLE_TIMEOUT_US, NULL)) drm_warn(display->drm, - "PHY %c failed to request refclk after %dus.\n", - phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US); + "PHY %c failed to request refclk\n", + phy_name(phy)); intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, - CX0_P2_STATE_RESET); + XELPDP_P2_STATE_RESET); intel_cx0_setup_powerdown(encoder); intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0); - if (intel_de_wait_for_clear(display, XELPDP_PORT_BUF_CTL2(display, port), - lane_phy_current_status, - XELPDP_PORT_RESET_END_TIMEOUT)) + if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_phy_current_status, + XELPDP_PORT_RESET_END_TIMEOUT_MS)) drm_warn(display->drm, - "PHY %c failed to bring out of Lane reset after %dms.\n", - phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT); + "PHY %c failed to bring out of lane reset\n", + phy_name(phy)); } static void intel_cx0_program_phy_lane(struct intel_encoder *encoder, int lane_count, @@ -3034,7 +3022,7 @@ static void __intel_cx0pll_enable(struct intel_encoder *encoder, * TODO: For DP alt mode use only one lane. */ intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, - CX0_P2_STATE_READY); + XELPDP_P2_STATE_READY); /* * 4. Program PORT_MSGBUS_TIMER register's Message Bus Timer field to 0xA000. @@ -3074,12 +3062,12 @@ static void __intel_cx0pll_enable(struct intel_encoder *encoder, intel_cx0_get_pclk_pll_request(maxpclk_lane)); /* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */ - if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), - intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), - intel_cx0_get_pclk_pll_ack(maxpclk_lane), - XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL)) - drm_warn(display->drm, "Port %c PLL not locked after %dus.\n", - phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US); + if (intel_de_wait_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), + intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), + intel_cx0_get_pclk_pll_ack(maxpclk_lane), + XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, NULL)) + drm_warn(display->drm, "Port %c PLL not locked\n", + phy_name(phy)); /* * 11. Follow the Display Voltage Frequency Switching Sequence After @@ -3160,8 +3148,8 @@ static int intel_mtl_tbt_clock_select(struct intel_display *display, } } -static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); @@ -3198,12 +3186,9 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, intel_de_write(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), val); /* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */ - if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), - XELPDP_TBT_CLOCK_ACK, - XELPDP_TBT_CLOCK_ACK, - 100, 0, NULL)) - drm_warn(display->drm, - "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n", + if (intel_de_wait_for_set_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), + XELPDP_TBT_CLOCK_ACK, 100)) + drm_warn(display->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); /* @@ -3275,13 +3260,13 @@ static u8 cx0_power_control_disable_val(struct intel_encoder *encoder) struct intel_display *display = to_intel_display(encoder); if (intel_encoder_is_c10phy(encoder)) - return CX0_P2PG_STATE_DISABLE; + return XELPDP_P2PG_STATE_DISABLE; if ((display->platform.battlemage && encoder->port == PORT_A) || (DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP)) - return CX0_P2PG_STATE_DISABLE; + return XELPDP_P2PG_STATE_DISABLE; - return CX0_P4PG_STATE_DISABLE; + return XELPDP_P4PG_STATE_DISABLE; } static void intel_cx0pll_disable(struct intel_encoder *encoder) @@ -3313,13 +3298,12 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) /* * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0". */ - if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), - intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | - intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0, - XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL)) - drm_warn(display->drm, - "Port %c PLL not unlocked after %dus.\n", - phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US); + if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), + intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | + intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), + XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US)) + drm_warn(display->drm, "Port %c PLL not unlocked\n", + phy_name(phy)); /* * 6. Follow the Display Voltage Frequency Switching Sequence After @@ -3345,7 +3329,7 @@ static bool intel_cx0_pll_is_enabled(struct intel_encoder *encoder) intel_cx0_get_pclk_pll_request(lane); } -static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) +void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); @@ -3362,10 +3346,9 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) XELPDP_TBT_CLOCK_REQUEST, 0); /* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */ - if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), - XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL)) - drm_warn(display->drm, - "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", + if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), + XELPDP_TBT_CLOCK_ACK, 10)) + drm_warn(display->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); /* @@ -3584,7 +3567,7 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state, struct intel_encoder *encoder; struct intel_cx0pll_state mpll_hw_state = {}; - if (DISPLAY_VER(display) < 14) + if (!IS_DISPLAY_VER(display, 14, 30)) return; if (!new_crtc_state->hw.active) diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h index c5a7b529955b..84d334b865f7 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h @@ -8,6 +8,9 @@ #include <linux/types.h> +#define MB_WRITE_COMMITTED true +#define MB_WRITE_UNCOMMITTED false + enum icl_port_dpll_id; struct intel_atomic_state; struct intel_c10pll_state; @@ -19,6 +22,8 @@ struct intel_display; struct intel_encoder; struct intel_hdmi; +void intel_clear_response_ready_flag(struct intel_encoder *encoder, + int lane); bool intel_encoder_is_c10phy(struct intel_encoder *encoder); void intel_mtl_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); @@ -41,9 +46,25 @@ bool intel_cx0pll_compare_hw_state(const struct intel_cx0pll_state *a, const struct intel_cx0pll_state *b); void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder, + u8 lane_mask, u8 state); +int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock); +void intel_cx0_setup_powerdown(struct intel_encoder *encoder); +bool intel_cx0_is_hdmi_frl(u32 clock); +u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr); +void intel_cx0_rmw(struct intel_encoder *encoder, + u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed); +void intel_cx0_write(struct intel_encoder *encoder, + u8 lane_mask, u16 addr, u8 data, bool committed); +int intel_cx0_wait_for_ack(struct intel_encoder *encoder, + int command, int lane, u32 *val); +void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane); int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder); void intel_cx0_pll_power_save_wa(struct intel_display *display); void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder); #endif /* __INTEL_CX0_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h index 77eae1d845f7..8df5cd5ce418 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h @@ -50,6 +50,7 @@ #define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1) #define XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2) #define XELPDP_PORT_M2P_COMMAND_READ REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3) +#define XELPDP_PORT_P2P_TRANSACTION_PENDING REG_BIT(24) #define XELPDP_PORT_M2P_DATA_MASK REG_GENMASK(23, 16) #define XELPDP_PORT_M2P_DATA(val) REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val) #define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15) @@ -73,14 +74,13 @@ #define XELPDP_PORT_P2M_DATA(val) REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val) #define XELPDP_PORT_P2M_ERROR_SET REG_BIT(15) -#define XELPDP_MSGBUS_TIMEOUT_SLOW 1 -#define XELPDP_MSGBUS_TIMEOUT_FAST_US 2 +#define XELPDP_MSGBUS_TIMEOUT_MS 1 #define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US 3200 #define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US 20 #define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US 100 #define XELPDP_PORT_RESET_START_TIMEOUT_US 5 -#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US 100 -#define XELPDP_PORT_RESET_END_TIMEOUT 15 +#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS 2 +#define XELPDP_PORT_RESET_END_TIMEOUT_MS 15 #define XELPDP_REFCLK_ENABLE_TIMEOUT_US 1 #define _XELPDP_PORT_BUF_CTL1_LN0_A 0x64004 @@ -104,6 +104,8 @@ #define XELPDP_PORT_BUF_PORT_DATA_20BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 1) #define XELPDP_PORT_BUF_PORT_DATA_40BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 2) #define XELPDP_PORT_REVERSAL REG_BIT(16) +#define XE3PLPDP_PHY_MODE_MASK REG_GENMASK(15, 12) +#define XE3PLPDP_PHY_MODE_DP REG_FIELD_PREP(XE3PLPDP_PHY_MODE_MASK, 0x3) #define XELPDP_PORT_BUF_IO_SELECT_TBT REG_BIT(11) #define XELPDP_PORT_BUF_PHY_IDLE REG_BIT(7) #define XELPDP_TC_PHY_OWNERSHIP REG_BIT(6) @@ -124,6 +126,7 @@ _XELPDP_PORT_BUF_CTL2(port)) #define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30)) #define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28)) +#define XE3PLPDP_LANE_PHY_PULSE_STATUS(lane) _PICK(lane, REG_BIT(27), REG_BIT(26)) #define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24)) #define _XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK REG_GENMASK(23, 20) #define _XELPDP_LANE0_POWERDOWN_NEW_STATE(val) REG_FIELD_PREP(_XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK, val) @@ -149,11 +152,12 @@ #define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val) #define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0) #define XELPDP_POWER_STATE_ACTIVE(val) REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val) -#define CX0_P0_STATE_ACTIVE 0x0 -#define CX0_P2_STATE_READY 0x2 -#define CX0_P2PG_STATE_DISABLE 0x9 -#define CX0_P4PG_STATE_DISABLE 0xC -#define CX0_P2_STATE_RESET 0x2 +#define XELPDP_P0_STATE_ACTIVE 0x0 +#define XELPDP_P2_STATE_READY 0x2 +#define XE3PLPD_P4_STATE_DISABLE 0x4 +#define XELPDP_P2PG_STATE_DISABLE 0x9 +#define XELPDP_P4PG_STATE_DISABLE 0xC +#define XELPDP_P2_STATE_RESET 0x2 #define _XELPDP_PORT_MSGBUS_TIMER_LN0_A 0x640d8 #define _XELPDP_PORT_MSGBUS_TIMER_LN0_B 0x641d8 @@ -298,10 +302,14 @@ #define PHY_C20_RD_DATA_L 0xC08 #define PHY_C20_RD_DATA_H 0xC09 #define PHY_C20_VDR_CUSTOM_SERDES_RATE 0xD00 -#define PHY_C20_VDR_HDMI_RATE 0xD01 +#define PHY_C20_IS_HDMI_FRL REG_BIT8(7) +#define PHY_C20_IS_DP REG_BIT8(6) +#define PHY_C20_DP_RATE_MASK REG_GENMASK8(4, 1) +#define PHY_C20_DP_RATE(val) REG_FIELD_PREP8(PHY_C20_DP_RATE_MASK, val) #define PHY_C20_CONTEXT_TOGGLE REG_BIT8(0) -#define PHY_C20_CUSTOM_SERDES_MASK REG_GENMASK8(4, 1) -#define PHY_C20_CUSTOM_SERDES(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_SERDES_MASK, val) +#define PHY_C20_VDR_HDMI_RATE 0xD01 +#define PHY_C20_HDMI_RATE_MASK REG_GENMASK8(1, 0) +#define PHY_C20_HDMI_RATE(val) REG_FIELD_PREP8(PHY_C20_HDMI_RATE_MASK, val) #define PHY_C20_VDR_CUSTOM_WIDTH 0xD02 #define PHY_C20_CUSTOM_WIDTH_MASK REG_GENMASK(1, 0) #define PHY_C20_CUSTOM_WIDTH(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_WIDTH_MASK, val) diff --git a/drivers/gpu/drm/i915/display/intel_dbuf_bw.c b/drivers/gpu/drm/i915/display/intel_dbuf_bw.c new file mode 100644 index 000000000000..8b8894c37f63 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dbuf_bw.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <drm/drm_print.h> + +#include "intel_dbuf_bw.h" +#include "intel_display_core.h" +#include "intel_display_types.h" +#include "skl_watermark.h" + +struct intel_dbuf_bw { + unsigned int max_bw[I915_MAX_DBUF_SLICES]; + u8 active_planes[I915_MAX_DBUF_SLICES]; +}; + +struct intel_dbuf_bw_state { + struct intel_global_state base; + struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES]; +}; + +struct intel_dbuf_bw_state *to_intel_dbuf_bw_state(struct intel_global_state *obj_state) +{ + return container_of(obj_state, struct intel_dbuf_bw_state, base); +} + +struct intel_dbuf_bw_state * +intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state) +{ + struct intel_display *display = to_intel_display(state); + struct intel_global_state *dbuf_bw_state; + + dbuf_bw_state = intel_atomic_get_old_global_obj_state(state, &display->dbuf_bw.obj); + + return to_intel_dbuf_bw_state(dbuf_bw_state); +} + +struct intel_dbuf_bw_state * +intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state) +{ + struct intel_display *display = to_intel_display(state); + struct intel_global_state *dbuf_bw_state; + + dbuf_bw_state = intel_atomic_get_new_global_obj_state(state, &display->dbuf_bw.obj); + + return to_intel_dbuf_bw_state(dbuf_bw_state); +} + +struct intel_dbuf_bw_state * +intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state) +{ + struct intel_display *display = to_intel_display(state); + struct intel_global_state *dbuf_bw_state; + + dbuf_bw_state = intel_atomic_get_global_obj_state(state, &display->dbuf_bw.obj); + if (IS_ERR(dbuf_bw_state)) + return ERR_CAST(dbuf_bw_state); + + return to_intel_dbuf_bw_state(dbuf_bw_state); +} + +static bool intel_dbuf_bw_changed(struct intel_display *display, + const struct intel_dbuf_bw *old_dbuf_bw, + const struct intel_dbuf_bw *new_dbuf_bw) +{ + enum dbuf_slice slice; + + for_each_dbuf_slice(display, slice) { + if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] || + old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice]) + return true; + } + + return false; +} + +static bool intel_dbuf_bw_state_changed(struct intel_display *display, + const struct intel_dbuf_bw_state *old_dbuf_bw_state, + const struct intel_dbuf_bw_state *new_dbuf_bw_state) +{ + enum pipe pipe; + + for_each_pipe(display, pipe) { + const struct intel_dbuf_bw *old_dbuf_bw = + &old_dbuf_bw_state->dbuf_bw[pipe]; + const struct intel_dbuf_bw *new_dbuf_bw = + &new_dbuf_bw_state->dbuf_bw[pipe]; + + if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw)) + return true; + } + + return false; +} + +static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw, + struct intel_crtc *crtc, + enum plane_id plane_id, + const struct skl_ddb_entry *ddb, + unsigned int data_rate) +{ + struct intel_display *display = to_intel_display(crtc); + unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb); + enum dbuf_slice slice; + + /* + * The arbiter can only really guarantee an + * equal share of the total bw to each plane. + */ + for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) { + dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate); + dbuf_bw->active_planes[slice] |= BIT(plane_id); + } +} + +static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw, + const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum plane_id plane_id; + + memset(dbuf_bw, 0, sizeof(*dbuf_bw)); + + if (!crtc_state->hw.active) + return; + + for_each_plane_id_on_crtc(crtc, plane_id) { + /* + * We assume cursors are small enough + * to not cause bandwidth problems. + */ + if (plane_id == PLANE_CURSOR) + continue; + + skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id, + &crtc_state->wm.skl.plane_ddb[plane_id], + crtc_state->data_rate[plane_id]); + + if (DISPLAY_VER(display) < 11) + skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id, + &crtc_state->wm.skl.plane_ddb_y[plane_id], + crtc_state->data_rate[plane_id]); + } +} + +/* "Maximum Data Buffer Bandwidth" */ +int intel_dbuf_bw_min_cdclk(struct intel_display *display, + const struct intel_dbuf_bw_state *dbuf_bw_state) +{ + unsigned int total_max_bw = 0; + enum dbuf_slice slice; + + for_each_dbuf_slice(display, slice) { + int num_active_planes = 0; + unsigned int max_bw = 0; + enum pipe pipe; + + /* + * The arbiter can only really guarantee an + * equal share of the total bw to each plane. + */ + for_each_pipe(display, pipe) { + const struct intel_dbuf_bw *dbuf_bw = &dbuf_bw_state->dbuf_bw[pipe]; + + max_bw = max(dbuf_bw->max_bw[slice], max_bw); + num_active_planes += hweight8(dbuf_bw->active_planes[slice]); + } + max_bw *= num_active_planes; + + total_max_bw = max(total_max_bw, max_bw); + } + + return DIV_ROUND_UP(total_max_bw, 64); +} + +int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state, + bool *need_cdclk_calc) +{ + struct intel_display *display = to_intel_display(state); + struct intel_dbuf_bw_state *new_dbuf_bw_state = NULL; + const struct intel_dbuf_bw_state *old_dbuf_bw_state = NULL; + const struct intel_crtc_state *old_crtc_state; + const struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int ret, i; + + if (DISPLAY_VER(display) < 9) + return 0; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw; + + skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state); + skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state); + + if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw)) + continue; + + new_dbuf_bw_state = intel_atomic_get_dbuf_bw_state(state); + if (IS_ERR(new_dbuf_bw_state)) + return PTR_ERR(new_dbuf_bw_state); + + old_dbuf_bw_state = intel_atomic_get_old_dbuf_bw_state(state); + + new_dbuf_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw; + } + + if (!old_dbuf_bw_state) + return 0; + + if (intel_dbuf_bw_state_changed(display, old_dbuf_bw_state, new_dbuf_bw_state)) { + ret = intel_atomic_lock_global_state(&new_dbuf_bw_state->base); + if (ret) + return ret; + } + + ret = intel_cdclk_update_dbuf_bw_min_cdclk(state, + intel_dbuf_bw_min_cdclk(display, old_dbuf_bw_state), + intel_dbuf_bw_min_cdclk(display, new_dbuf_bw_state), + need_cdclk_calc); + if (ret) + return ret; + + return 0; +} + +void intel_dbuf_bw_update_hw_state(struct intel_display *display) +{ + struct intel_dbuf_bw_state *dbuf_bw_state = + to_intel_dbuf_bw_state(display->dbuf_bw.obj.state); + struct intel_crtc *crtc; + + if (DISPLAY_VER(display) < 9) + return; + + for_each_intel_crtc(display->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + skl_crtc_calc_dbuf_bw(&dbuf_bw_state->dbuf_bw[crtc->pipe], crtc_state); + } +} + +void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc) +{ + struct intel_display *display = to_intel_display(crtc); + struct intel_dbuf_bw_state *dbuf_bw_state = + to_intel_dbuf_bw_state(display->dbuf_bw.obj.state); + enum pipe pipe = crtc->pipe; + + if (DISPLAY_VER(display) < 9) + return; + + memset(&dbuf_bw_state->dbuf_bw[pipe], 0, sizeof(dbuf_bw_state->dbuf_bw[pipe])); +} + +static struct intel_global_state * +intel_dbuf_bw_duplicate_state(struct intel_global_obj *obj) +{ + struct intel_dbuf_bw_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + return &state->base; +} + +static void intel_dbuf_bw_destroy_state(struct intel_global_obj *obj, + struct intel_global_state *state) +{ + kfree(state); +} + +static const struct intel_global_state_funcs intel_dbuf_bw_funcs = { + .atomic_duplicate_state = intel_dbuf_bw_duplicate_state, + .atomic_destroy_state = intel_dbuf_bw_destroy_state, +}; + +int intel_dbuf_bw_init(struct intel_display *display) +{ + struct intel_dbuf_bw_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + intel_atomic_global_obj_init(display, &display->dbuf_bw.obj, + &state->base, &intel_dbuf_bw_funcs); + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_dbuf_bw.h b/drivers/gpu/drm/i915/display/intel_dbuf_bw.h new file mode 100644 index 000000000000..61875b9d5969 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dbuf_bw.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef __INTEL_DBUF_BW_H__ +#define __INTEL_DBUF_BW_H__ + +#include <drm/drm_atomic.h> + +struct intel_atomic_state; +struct intel_dbuf_bw_state; +struct intel_crtc; +struct intel_display; +struct intel_global_state; + +struct intel_dbuf_bw_state * +to_intel_dbuf_bw_state(struct intel_global_state *obj_state); + +struct intel_dbuf_bw_state * +intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state); + +struct intel_dbuf_bw_state * +intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state); + +struct intel_dbuf_bw_state * +intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state); + +int intel_dbuf_bw_init(struct intel_display *display); +int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state, + bool *need_cdclk_calc); +int intel_dbuf_bw_min_cdclk(struct intel_display *display, + const struct intel_dbuf_bw_state *dbuf_bw_state); +void intel_dbuf_bw_update_hw_state(struct intel_display *display); +void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc); + +#endif /* __INTEL_DBUF_BW_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c09aa759f4d4..002ccd47856d 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -35,7 +35,6 @@ #include <drm/drm_privacy_screen_consumer.h> #include "i915_reg.h" -#include "i915_utils.h" #include "icl_dsi.h" #include "intel_alpm.h" #include "intel_audio.h" @@ -53,6 +52,7 @@ #include "intel_display_power.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" #include "intel_dp.h" @@ -72,6 +72,7 @@ #include "intel_hotplug.h" #include "intel_hti.h" #include "intel_lspcon.h" +#include "intel_lt_phy.h" #include "intel_mg_phy_regs.h" #include "intel_modeset_lock.h" #include "intel_panel.h" @@ -209,8 +210,8 @@ void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port) } static_assert(DDI_BUF_IS_IDLE == XELPDP_PORT_BUF_PHY_IDLE); - if (intel_de_wait_for_set(display, intel_ddi_buf_status_reg(display, port), - DDI_BUF_IS_IDLE, 10)) + if (intel_de_wait_for_set_ms(display, intel_ddi_buf_status_reg(display, port), + DDI_BUF_IS_IDLE, 10)) drm_err(display->drm, "Timeout waiting for DDI BUF %c to get idle\n", port_name(port)); } @@ -234,8 +235,8 @@ static void intel_wait_ddi_buf_active(struct intel_encoder *encoder) } static_assert(DDI_BUF_IS_IDLE == XELPDP_PORT_BUF_PHY_IDLE); - if (intel_de_wait_for_clear(display, intel_ddi_buf_status_reg(display, port), - DDI_BUF_IS_IDLE, 10)) + if (intel_de_wait_for_clear_ms(display, intel_ddi_buf_status_reg(display, port), + DDI_BUF_IS_IDLE, 10)) drm_err(display->drm, "Timeout waiting for DDI BUF %c to get active\n", port_name(port)); } @@ -1466,10 +1467,15 @@ static int translate_signal_level(struct intel_dp *intel_dp, u8 signal_levels) { struct intel_display *display = to_intel_display(intel_dp); + const u8 *signal_array; + size_t array_size; int i; - for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) { - if (index_to_dp_signal_levels[i] == signal_levels) + signal_array = index_to_dp_signal_levels; + array_size = ARRAY_SIZE(index_to_dp_signal_levels); + + for (i = 0; i < array_size; i++) { + if (signal_array[i] == signal_levels) return i; } @@ -2301,8 +2307,8 @@ void intel_ddi_wait_for_act_sent(struct intel_encoder *encoder, { struct intel_display *display = to_intel_display(encoder); - if (intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state), - DP_TP_STATUS_ACT_SENT, 1)) + if (intel_de_wait_for_set_ms(display, dp_tp_status_reg(encoder, crtc_state), + DP_TP_STATUS_ACT_SENT, 1)) drm_err(display->drm, "Timed out waiting for ACT sent\n"); } @@ -2377,11 +2383,11 @@ int intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, return 0; if (enabled) - ret = intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state), - DP_TP_STATUS_FEC_ENABLE_LIVE, 1); + ret = intel_de_wait_for_set_ms(display, dp_tp_status_reg(encoder, crtc_state), + DP_TP_STATUS_FEC_ENABLE_LIVE, 1); else - ret = intel_de_wait_for_clear(display, dp_tp_status_reg(encoder, crtc_state), - DP_TP_STATUS_FEC_ENABLE_LIVE, 1); + ret = intel_de_wait_for_clear_ms(display, dp_tp_status_reg(encoder, crtc_state), + DP_TP_STATUS_FEC_ENABLE_LIVE, 1); if (ret) { drm_err(display->drm, @@ -2571,9 +2577,7 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder) intel_de_rmw(display, reg, 0, set_bits); - ret = intel_de_wait_custom(display, reg, - wait_bits, wait_bits, - 100, 0, NULL); + ret = intel_de_wait_for_set_us(display, reg, wait_bits, 100); if (ret) { drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n", port_name(port)); @@ -3073,9 +3077,7 @@ mtl_ddi_disable_d2d(struct intel_encoder *encoder) intel_de_rmw(display, reg, clr_bits, 0); - ret = intel_de_wait_custom(display, reg, - wait_bits, 0, - 100, 0, NULL); + ret = intel_de_wait_for_clear_us(display, reg, wait_bits, 100); if (ret) drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n", port_name(port)); @@ -3862,9 +3864,9 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp, if (port == PORT_A && DISPLAY_VER(display) < 12) return; - if (intel_de_wait_for_set(display, - dp_tp_status_reg(encoder, crtc_state), - DP_TP_STATUS_IDLE_DONE, 2)) + if (intel_de_wait_for_set_ms(display, + dp_tp_status_reg(encoder, crtc_state), + DP_TP_STATUS_IDLE_DONE, 2)) drm_err(display->drm, "Timed out waiting for DP idle patterns\n"); } @@ -4240,6 +4242,19 @@ void intel_ddi_get_clock(struct intel_encoder *encoder, &crtc_state->dpll_hw_state); } +static void xe3plpd_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + intel_lt_phy_pll_readout_hw_state(encoder, crtc_state, &crtc_state->dpll_hw_state.ltpll); + + if (crtc_state->dpll_hw_state.ltpll.tbt_mode) + crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder); + else + crtc_state->port_clock = + intel_lt_phy_calc_port_clock(encoder, crtc_state); + intel_ddi_get_config(encoder, crtc_state); +} + static void mtl_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { @@ -4559,6 +4574,13 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder, struct intel_display *display = to_intel_display(encoder); struct drm_connector *connector = conn_state->connector; u8 port_sync_transcoders = 0; + int ret = 0; + + if (intel_crtc_has_dp_encoder(crtc_state)) + ret = intel_dp_compute_config_late(encoder, crtc_state, conn_state); + + if (ret) + return ret; drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n", encoder->base.base.id, encoder->base.name, @@ -5224,7 +5246,12 @@ void intel_ddi_init(struct intel_display *display, encoder->cloneable = 0; encoder->pipe_mask = ~0; - if (DISPLAY_VER(display) >= 14) { + if (HAS_LT_PHY(display)) { + encoder->enable_clock = intel_xe3plpd_pll_enable; + encoder->disable_clock = intel_xe3plpd_pll_disable; + encoder->port_pll_type = intel_mtl_port_pll_type; + encoder->get_config = xe3plpd_ddi_get_config; + } else if (DISPLAY_VER(display) >= 14) { encoder->enable_clock = intel_mtl_pll_enable; encoder->disable_clock = intel_mtl_pll_disable; encoder->port_pll_type = intel_mtl_port_pll_type; @@ -5289,7 +5316,9 @@ void intel_ddi_init(struct intel_display *display, encoder->get_config = hsw_ddi_get_config; } - if (DISPLAY_VER(display) >= 14) { + if (HAS_LT_PHY(display)) { + encoder->set_signal_levels = intel_lt_phy_set_signal_levels; + } else if (DISPLAY_VER(display) >= 14) { encoder->set_signal_levels = intel_cx0_phy_set_signal_levels; } else if (display->platform.dg2) { encoder->set_signal_levels = intel_snps_phy_set_signal_levels; diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index a238be5bc455..395dba8c9e4d 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -3,13 +3,14 @@ * Copyright © 2020 Intel Corporation */ -#include "i915_utils.h" #include "intel_cx0_phy.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" +#include "intel_lt_phy.h" /* HDMI/DVI modes ignore everything but the last 2 items. So we share * them for both DP and FDI transports, allowing those ports to @@ -1115,6 +1116,69 @@ static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = { .num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr), }; +/* DP1.4 */ +static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_dp14[] = { + { .lt = { 1, 0, 0, 21, 0 } }, + { .lt = { 1, 1, 0, 24, 3 } }, + { .lt = { 1, 2, 0, 28, 7 } }, + { .lt = { 0, 3, 0, 35, 13 } }, + { .lt = { 1, 1, 0, 27, 0 } }, + { .lt = { 1, 2, 0, 31, 4 } }, + { .lt = { 0, 3, 0, 39, 9 } }, + { .lt = { 1, 2, 0, 35, 0 } }, + { .lt = { 0, 3, 0, 41, 7 } }, + { .lt = { 0, 3, 0, 48, 0 } }, +}; + +/* DP2.1 */ +static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_uhbr[] = { + { .lt = { 0, 0, 0, 48, 0 } }, + { .lt = { 0, 0, 0, 43, 5 } }, + { .lt = { 0, 0, 0, 40, 8 } }, + { .lt = { 0, 0, 0, 37, 11 } }, + { .lt = { 0, 0, 0, 33, 15 } }, + { .lt = { 0, 0, 2, 46, 0 } }, + { .lt = { 0, 0, 2, 42, 4 } }, + { .lt = { 0, 0, 2, 38, 8 } }, + { .lt = { 0, 0, 2, 35, 11 } }, + { .lt = { 0, 0, 2, 33, 13 } }, + { .lt = { 0, 0, 4, 44, 0 } }, + { .lt = { 0, 0, 4, 40, 4 } }, + { .lt = { 0, 0, 4, 37, 7 } }, + { .lt = { 0, 0, 4, 33, 11 } }, + { .lt = { 0, 0, 8, 40, 0 } }, + { .lt = { 1, 0, 2, 26, 2 } }, +}; + +/* eDp */ +static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_edp[] = { + { .lt = { 1, 0, 0, 12, 0 } }, + { .lt = { 1, 1, 0, 13, 1 } }, + { .lt = { 1, 2, 0, 15, 3 } }, + { .lt = { 1, 3, 0, 19, 7 } }, + { .lt = { 1, 1, 0, 14, 0 } }, + { .lt = { 1, 2, 0, 16, 2 } }, + { .lt = { 1, 3, 0, 21, 5 } }, + { .lt = { 1, 2, 0, 18, 0 } }, + { .lt = { 1, 3, 0, 22, 4 } }, + { .lt = { 1, 3, 0, 26, 0 } }, +}; + +static const struct intel_ddi_buf_trans xe3plpd_lt_trans_dp14 = { + .entries = _xe3plpd_lt_trans_dp14, + .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_dp14), +}; + +static const struct intel_ddi_buf_trans xe3plpd_lt_trans_uhbr = { + .entries = _xe3plpd_lt_trans_uhbr, + .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_uhbr), +}; + +static const struct intel_ddi_buf_trans xe3plpd_lt_trans_edp = { + .entries = _xe3plpd_lt_trans_edp, + .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_edp), +}; + bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table) { return table == &tgl_combo_phy_trans_edp_hbr2_hobl; @@ -1707,11 +1771,26 @@ mtl_get_c20_buf_trans(struct intel_encoder *encoder, return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries); } +static const struct intel_ddi_buf_trans * +xe3plpd_get_lt_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_dp_encoder(crtc_state) && intel_dp_is_uhbr(crtc_state)) + return intel_get_buf_trans(&xe3plpd_lt_trans_uhbr, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return intel_get_buf_trans(&xe3plpd_lt_trans_edp, n_entries); + else + return intel_get_buf_trans(&xe3plpd_lt_trans_dp14, n_entries); +} + void intel_ddi_buf_trans_init(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - if (DISPLAY_VER(display) >= 14) { + if (HAS_LT_PHY(display)) { + encoder->get_buf_trans = xe3plpd_get_lt_buf_trans; + } else if (DISPLAY_VER(display) >= 14) { if (intel_encoder_is_c10phy(encoder)) encoder->get_buf_trans = mtl_get_c10_buf_trans; else diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h index 29a190390192..cec332090a20 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h @@ -50,6 +50,14 @@ struct dg2_snps_phy_buf_trans { u8 post_cursor; }; +struct xe3plpd_lt_phy_buf_trans { + u8 txswing; + u8 txswing_level; + u8 pre_cursor; + u8 main_cursor; + u8 post_cursor; +}; + union intel_ddi_buf_trans_entry { struct hsw_ddi_buf_trans hsw; struct bxt_ddi_buf_trans bxt; @@ -57,6 +65,7 @@ union intel_ddi_buf_trans_entry { struct icl_mg_phy_ddi_buf_trans mg; struct tgl_dkl_phy_ddi_buf_trans dkl; struct dg2_snps_phy_buf_trans snps; + struct xe3plpd_lt_phy_buf_trans lt; }; struct intel_ddi_buf_trans { diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h index 9ecdcf6b73e4..a7ce3b875e06 100644 --- a/drivers/gpu/drm/i915/display/intel_de.h +++ b/drivers/gpu/drm/i915/display/intel_de.h @@ -84,20 +84,13 @@ intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val) } static inline u32 -__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg, - u32 clear, u32 set) -{ - return intel_uncore_rmw(__to_uncore(display), reg, clear, set); -} - -static inline u32 intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set) { u32 val; intel_dmc_wl_get(display, reg); - val = __intel_de_rmw_nowl(display, reg, clear, set); + val = intel_uncore_rmw(__to_uncore(display), reg, clear, set); intel_dmc_wl_put(display, reg); @@ -105,34 +98,16 @@ intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set) } static inline int -__intel_de_wait_for_register_nowl(struct intel_display *display, - i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout_ms) -{ - return intel_wait_for_register(__to_uncore(display), reg, mask, - value, timeout_ms); -} - -static inline int -__intel_de_wait_for_register_atomic_nowl(struct intel_display *display, - i915_reg_t reg, - u32 mask, u32 value, - unsigned int fast_timeout_us) -{ - return __intel_wait_for_register(__to_uncore(display), reg, mask, - value, fast_timeout_us, 0, NULL); -} - -static inline int -intel_de_wait(struct intel_display *display, i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout_ms) +intel_de_wait_us(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout_us, + u32 *out_value) { int ret; intel_dmc_wl_get(display, reg); - ret = __intel_de_wait_for_register_nowl(display, reg, mask, value, - timeout_ms); + ret = __intel_wait_for_register(__to_uncore(display), reg, mask, + value, timeout_us, 0, out_value); intel_dmc_wl_put(display, reg); @@ -140,15 +115,16 @@ intel_de_wait(struct intel_display *display, i915_reg_t reg, } static inline int -intel_de_wait_fw(struct intel_display *display, i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout_ms, u32 *out_value) +intel_de_wait_ms(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout_ms, + u32 *out_value) { int ret; intel_dmc_wl_get(display, reg); - ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask, - value, timeout_ms, out_value); + ret = __intel_wait_for_register(__to_uncore(display), reg, mask, + value, 2, timeout_ms, out_value); intel_dmc_wl_put(display, reg); @@ -156,36 +132,49 @@ intel_de_wait_fw(struct intel_display *display, i915_reg_t reg, } static inline int -intel_de_wait_custom(struct intel_display *display, i915_reg_t reg, - u32 mask, u32 value, - unsigned int fast_timeout_us, - unsigned int slow_timeout_ms, u32 *out_value) +intel_de_wait_fw_ms(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout_ms, + u32 *out_value) { - int ret; - - intel_dmc_wl_get(display, reg); + return __intel_wait_for_register_fw(__to_uncore(display), reg, mask, + value, 2, timeout_ms, out_value); +} - ret = __intel_wait_for_register(__to_uncore(display), reg, mask, - value, - fast_timeout_us, slow_timeout_ms, out_value); +static inline int +intel_de_wait_fw_us_atomic(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout_us, + u32 *out_value) +{ + return __intel_wait_for_register_fw(__to_uncore(display), reg, mask, + value, timeout_us, 0, out_value); +} - intel_dmc_wl_put(display, reg); +static inline int +intel_de_wait_for_set_us(struct intel_display *display, i915_reg_t reg, + u32 mask, unsigned int timeout_us) +{ + return intel_de_wait_us(display, reg, mask, mask, timeout_us, NULL); +} - return ret; +static inline int +intel_de_wait_for_clear_us(struct intel_display *display, i915_reg_t reg, + u32 mask, unsigned int timeout_us) +{ + return intel_de_wait_us(display, reg, mask, 0, timeout_us, NULL); } static inline int -intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg, - u32 mask, unsigned int timeout_ms) +intel_de_wait_for_set_ms(struct intel_display *display, i915_reg_t reg, + u32 mask, unsigned int timeout_ms) { - return intel_de_wait(display, reg, mask, mask, timeout_ms); + return intel_de_wait_ms(display, reg, mask, mask, timeout_ms, NULL); } static inline int -intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg, - u32 mask, unsigned int timeout_ms) +intel_de_wait_for_clear_ms(struct intel_display *display, i915_reg_t reg, + u32 mask, unsigned int timeout_ms) { - return intel_de_wait(display, reg, mask, 0, timeout_ms); + return intel_de_wait_ms(display, reg, mask, 0, timeout_ms, NULL); } /* @@ -215,6 +204,18 @@ intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val) } static inline u32 +intel_de_rmw_fw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set) +{ + u32 old, val; + + old = intel_de_read_fw(display, reg); + val = (old & ~clear) | set; + intel_de_write_fw(display, reg, val); + + return old; +} + +static inline u32 intel_de_read_notrace(struct intel_display *display, i915_reg_t reg) { return intel_uncore_read_notrace(__to_uncore(display), reg); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 5dca7f96b425..7b4fd18c60e2 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -41,6 +41,7 @@ #include <drm/drm_edid.h> #include <drm/drm_fixed.h> #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> #include <drm/drm_vblank.h> @@ -51,7 +52,6 @@ #include "i915_config.h" #include "i915_drv.h" #include "i915_reg.h" -#include "i915_utils.h" #include "i9xx_plane.h" #include "i9xx_plane_regs.h" #include "i9xx_wm.h" @@ -60,6 +60,7 @@ #include "intel_audio.h" #include "intel_bo.h" #include "intel_bw.h" +#include "intel_casf.h" #include "intel_cdclk.h" #include "intel_clock_gating.h" #include "intel_color.h" @@ -76,6 +77,7 @@ #include "intel_display_regs.h" #include "intel_display_rpm.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_display_wa.h" #include "intel_dmc.h" #include "intel_dp.h" @@ -99,6 +101,7 @@ #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_link_bw.h" +#include "intel_lt_phy.h" #include "intel_lvds.h" #include "intel_lvds_regs.h" #include "intel_modeset_setup.h" @@ -129,11 +132,9 @@ #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_watermark.h" -#include "vlv_dpio_phy_regs.h" #include "vlv_dsi.h" #include "vlv_dsi_pll.h" #include "vlv_dsi_regs.h" -#include "vlv_sideband.h" static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); @@ -141,65 +142,6 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); static void bdw_set_pipe_misc(struct intel_dsb *dsb, const struct intel_crtc_state *crtc_state); -/* returns HPLL frequency in kHz */ -int vlv_get_hpll_vco(struct drm_device *drm) -{ - int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; - - /* Obtain SKU information */ - hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) & - CCK_FUSE_HPLL_FREQ_MASK; - - return vco_freq[hpll_freq] * 1000; -} - -int vlv_get_cck_clock(struct drm_device *drm, - const char *name, u32 reg, int ref_freq) -{ - u32 val; - int divider; - - val = vlv_cck_read(drm, reg); - divider = val & CCK_FREQUENCY_VALUES; - - drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) != - (divider << CCK_FREQUENCY_STATUS_SHIFT), - "%s change in progress\n", name); - - return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); -} - -int vlv_get_cck_clock_hpll(struct drm_device *drm, - const char *name, u32 reg) -{ - struct drm_i915_private *dev_priv = to_i915(drm); - int hpll; - - vlv_cck_get(drm); - - if (dev_priv->hpll_freq == 0) - dev_priv->hpll_freq = vlv_get_hpll_vco(drm); - - hpll = vlv_get_cck_clock(drm, name, reg, dev_priv->hpll_freq); - - vlv_cck_put(drm); - - return hpll; -} - -void intel_update_czclk(struct intel_display *display) -{ - struct drm_i915_private *dev_priv = to_i915(display->drm); - - if (!display->platform.valleyview && !display->platform.cherryview) - return; - - dev_priv->czclk_freq = vlv_get_cck_clock_hpll(display->drm, "czclk", - CCK_CZ_CLOCK_CONTROL); - - drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq); -} - static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) { return (crtc_state->active_planes & @@ -417,8 +359,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; /* Wait for the Pipe State to go off */ - if (intel_de_wait_for_clear(display, TRANSCONF(display, cpu_transcoder), - TRANSCONF_STATE_ENABLE, 100)) + if (intel_de_wait_for_clear_ms(display, TRANSCONF(display, cpu_transcoder), + TRANSCONF_STATE_ENABLE, 100)) drm_WARN(display->drm, 1, "pipe_off wait timed out\n"); } else { intel_wait_for_pipe_scanline_stopped(crtc); @@ -605,16 +547,13 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) intel_wait_for_pipe_off(old_crtc_state); } -u32 intel_plane_fb_max_stride(struct drm_device *drm, - u32 pixel_format, u64 modifier) +u32 intel_plane_fb_max_stride(struct intel_display *display, + const struct drm_format_info *info, + u64 modifier) { - struct intel_display *display = to_intel_display(drm); struct intel_crtc *crtc; struct intel_plane *plane; - if (!HAS_DISPLAY(display)) - return 0; - /* * We assume the primary plane for pipe A has * the highest stride limits of them all, @@ -626,10 +565,23 @@ u32 intel_plane_fb_max_stride(struct drm_device *drm, plane = to_intel_plane(crtc->base.primary); - return plane->max_stride(plane, pixel_format, modifier, + return plane->max_stride(plane, info, modifier, DRM_MODE_ROTATE_0); } +u32 intel_dumb_fb_max_stride(struct drm_device *drm, + u32 pixel_format, u64 modifier) +{ + struct intel_display *display = to_intel_display(drm); + + if (!HAS_DISPLAY(display)) + return 0; + + return intel_plane_fb_max_stride(display, + drm_get_format_info(drm, pixel_format, modifier), + modifier); +} + void intel_set_plane_visible(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state, bool visible) @@ -891,9 +843,8 @@ static void intel_async_flip_vtd_wa(struct intel_display *display, static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - return crtc_state->uapi.async_flip && i915_vtd_active(i915) && + return crtc_state->uapi.async_flip && intel_display_vtd_active(display) && (DISPLAY_VER(display) == 9 || display->platform.broadwell || display->platform.haswell); } @@ -1040,6 +991,24 @@ static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); } +static bool intel_casf_enabling(const struct intel_crtc_state *new_crtc_state, + const struct intel_crtc_state *old_crtc_state) +{ + if (!new_crtc_state->hw.active) + return false; + + return is_enabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state); +} + +static bool intel_casf_disabling(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + if (!new_crtc_state->hw.active) + return false; + + return is_disabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state); +} + #undef is_disabling #undef is_enabling @@ -1195,6 +1164,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, if (audio_disabling(old_crtc_state, new_crtc_state)) intel_encoders_audio_disable(state, crtc); + if (intel_casf_disabling(old_crtc_state, new_crtc_state)) + intel_casf_disable(new_crtc_state); + intel_drrs_deactivate(old_crtc_state); if (hsw_ips_pre_update(state, crtc)) @@ -1642,8 +1614,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta } intel_set_transcoder_timings(crtc_state); - if (HAS_VRR(display)) - intel_vrr_set_transcoder_timings(crtc_state); + intel_vrr_set_transcoder_timings(crtc_state); if (cpu_transcoder != TRANSCODER_EDP) intel_de_write(display, TRANS_MULT(display, cpu_transcoder), @@ -2422,39 +2393,44 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) return 0; } -static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state) +static int intel_crtc_set_context_latency(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - int vblank_delay = 0; + int set_context_latency = 0; if (!HAS_DSB(display)) return 0; - vblank_delay = max(vblank_delay, intel_psr_min_vblank_delay(crtc_state)); + set_context_latency = max(set_context_latency, + intel_psr_min_set_context_latency(crtc_state)); - return vblank_delay; + return set_context_latency; } -static int intel_crtc_compute_vblank_delay(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static int intel_crtc_compute_set_context_latency(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - int vblank_delay, max_vblank_delay; + int set_context_latency, max_vblank_delay; + + set_context_latency = intel_crtc_set_context_latency(crtc_state); - vblank_delay = intel_crtc_vblank_delay(crtc_state); max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1; - if (vblank_delay > max_vblank_delay) { - drm_dbg_kms(display->drm, "[CRTC:%d:%s] vblank delay (%d) exceeds max (%d)\n", - crtc->base.base.id, crtc->base.name, vblank_delay, max_vblank_delay); + if (set_context_latency > max_vblank_delay) { + drm_dbg_kms(display->drm, "[CRTC:%d:%s] set context latency (%d) exceeds max (%d)\n", + crtc->base.base.id, crtc->base.name, + set_context_latency, + max_vblank_delay); return -EINVAL; } - adjusted_mode->crtc_vblank_start += vblank_delay; + crtc_state->set_context_latency = set_context_latency; + adjusted_mode->crtc_vblank_start += set_context_latency; return 0; } @@ -2466,11 +2442,11 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); int ret; - ret = intel_crtc_compute_vblank_delay(state, crtc); + ret = intel_dpll_crtc_compute_clock(state, crtc); if (ret) return ret; - ret = intel_dpll_crtc_compute_clock(state, crtc); + ret = intel_crtc_compute_set_context_latency(state, crtc); if (ret) return ret; @@ -2487,6 +2463,8 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state, if (crtc_state->has_pch_encoder) return ilk_fdi_compute_config(crtc, crtc_state); + intel_vrr_compute_guardband(crtc_state); + return 0; } @@ -2678,16 +2656,19 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta if (DISPLAY_VER(display) >= 13) { intel_de_write(display, TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder), - crtc_vblank_start - crtc_vdisplay); + crtc_state->set_context_latency); /* * VBLANK_START not used by hw, just clear it * to make it stand out in register dumps. */ crtc_vblank_start = 1; + } else if (DISPLAY_VER(display) == 12) { + /* VBLANK_START - VACTIVE defines SCL on TGL */ + crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency; } - if (DISPLAY_VER(display) >= 4) + if (DISPLAY_VER(display) >= 4 && DISPLAY_VER(display) < 35) intel_de_write(display, TRANS_VSYNCSHIFT(display, cpu_transcoder), vsyncshift); @@ -2768,13 +2749,16 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc if (DISPLAY_VER(display) >= 13) { intel_de_write(display, TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder), - crtc_vblank_start - crtc_vdisplay); + crtc_state->set_context_latency); /* * VBLANK_START not used by hw, just clear it * to make it stand out in register dumps. */ crtc_vblank_start = 1; + } else if (DISPLAY_VER(display) == 12) { + /* VBLANK_START - VACTIVE defines SCL on TGL */ + crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency; } /* @@ -2825,7 +2809,7 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (DISPLAY_VER(display) == 2) + if (DISPLAY_VER(display) == 2 || DISPLAY_VER(display) >= 35) return false; if (DISPLAY_VER(display) >= 9 || @@ -2881,11 +2865,24 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc, adjusted_mode->crtc_vblank_end += 1; } - if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder)) - adjusted_mode->crtc_vblank_start = - adjusted_mode->crtc_vdisplay + + if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder)) { + pipe_config->set_context_latency = intel_de_read(display, TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder)); + adjusted_mode->crtc_vblank_start = + adjusted_mode->crtc_vdisplay + + pipe_config->set_context_latency; + } else if (DISPLAY_VER(display) == 12) { + /* + * TGL doesn't have a dedicated register for SCL. + * Instead, the hardware derives SCL from the difference between + * TRANS_VBLANK.vblank_start and TRANS_VTOTAL.vactive. + * To reflect the HW behaviour, readout the value for SCL as + * Vblank start - Vactive. + */ + pipe_config->set_context_latency = + adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay; + } if (DISPLAY_VER(display) >= 30) pipe_config->min_hblank = intel_de_read(display, @@ -3203,10 +3200,12 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) if (display->platform.haswell && crtc_state->dither) val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; - if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - val |= TRANSCONF_INTERLACE_IF_ID_ILK; - else - val |= TRANSCONF_INTERLACE_PF_PD_ILK; + if (DISPLAY_VER(display) < 35) { + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) + val |= TRANSCONF_INTERLACE_IF_ID_ILK; + else + val |= TRANSCONF_INTERLACE_PF_PD_ILK; + } if (display->platform.haswell && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) @@ -3952,6 +3951,20 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, intel_joiner_get_config(pipe_config); intel_dsc_get_config(pipe_config); + /* intel_vrr_get_config() depends on .framestart_delay */ + if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { + tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder)); + + pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; + } else { + /* no idea if this is correct */ + pipe_config->framestart_delay = 1; + } + + /* + * intel_vrr_get_config() depends on TRANS_SET_CONTEXT_LATENCY + * readout done by intel_get_transcoder_timings(). + */ if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || DISPLAY_VER(display) >= 11) intel_get_transcoder_timings(crtc, pipe_config); @@ -4003,15 +4016,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = 1; } - if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { - tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder)); - - pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; - } else { - /* no idea if this is correct */ - pipe_config->framestart_delay = 1; - } - out: intel_display_power_put_all_in_set(display, &crtc->hw_readout_power_domains); @@ -4258,9 +4262,14 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, return ret; } + ret = intel_casf_compute_config(crtc_state); + if (ret) + return ret; + if (DISPLAY_VER(display) >= 9) { if (intel_crtc_needs_modeset(crtc_state) || - intel_crtc_needs_fastset(crtc_state)) { + intel_crtc_needs_fastset(crtc_state) || + intel_casf_needs_scaler(crtc_state)) { ret = skl_update_scaler_crtc(crtc_state); if (ret) return ret; @@ -4639,7 +4648,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state, if (ret) return ret; - crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe); + crtc_state->dsc.compression_enabled_on_link = limits->link_dsc_pipes & BIT(crtc->pipe); crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) { @@ -4760,8 +4769,6 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state, struct drm_connector *connector; int i; - intel_vrr_compute_config_late(crtc_state); - for_each_new_connector_in_state(&state->base, connector, conn_state, i) { struct intel_encoder *encoder = @@ -4996,9 +5003,33 @@ static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_s * Allow fastboot to fix up vblank delay (handled via LRR * codepaths), a bit dodgy as the registers aren't * double buffered but seems to be working more or less... + * + * Also allow this when the VRR timing generator is always on, + * and optimized guardband is used. In such cases, + * vblank delay may vary even without inherited state, but it's + * still safe as VRR guardband is still same. */ - return HAS_LRR(display) && old_crtc_state->inherited && - !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI); + return HAS_LRR(display) && + (old_crtc_state->inherited || intel_vrr_always_use_vrr_tg(display)) && + !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI); +} + +static void +pipe_config_lt_phy_pll_mismatch(struct drm_printer *p, bool fastset, + const struct intel_crtc *crtc, + const char *name, + const struct intel_lt_phy_pll_state *a, + const struct intel_lt_phy_pll_state *b) +{ + struct intel_display *display = to_intel_display(crtc); + char *chipname = "LTPHY"; + + pipe_config_mismatch(p, fastset, crtc, name, chipname); + + drm_printf(p, "expected:\n"); + intel_lt_phy_dump_hw_state(display, a); + drm_printf(p, "found:\n"); + intel_lt_phy_dump_hw_state(display, b); } bool @@ -5125,6 +5156,16 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } \ } while (0) +#define PIPE_CONF_CHECK_PLL_LT(name) do { \ + if (!intel_lt_phy_pll_compare_hw_state(¤t_config->name, \ + &pipe_config->name)) { \ + pipe_config_lt_phy_pll_mismatch(&p, fastset, crtc, __stringify(name), \ + ¤t_config->name, \ + &pipe_config->name); \ + ret = false; \ + } \ +} while (0) + #define PIPE_CONF_CHECK_TIMINGS(name) do { \ PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ PIPE_CONF_CHECK_I(name.crtc_htotal); \ @@ -5319,6 +5360,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(scaler_state.scaler_id); PIPE_CONF_CHECK_I(pixel_rate); + PIPE_CONF_CHECK_BOOL(hw.casf_params.casf_enable); + PIPE_CONF_CHECK_I(hw.casf_params.win_size); + PIPE_CONF_CHECK_I(hw.casf_params.strength); PIPE_CONF_CHECK_X(gamma_mode); if (display->platform.cherryview) @@ -5349,7 +5393,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_PLL(dpll_hw_state); /* FIXME convert MTL+ platforms over to dpll_mgr */ - if (DISPLAY_VER(display) >= 14) + if (HAS_LT_PHY(display)) + PIPE_CONF_CHECK_PLL_LT(dpll_hw_state.ltpll); + else if (DISPLAY_VER(display) >= 14) PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll); PIPE_CONF_CHECK_X(dsi_pll.ctrl); @@ -5443,6 +5489,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(vrr.guardband); } + PIPE_CONF_CHECK_I(set_context_latency); + #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_LLI @@ -5689,6 +5737,23 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) return 0; } +u8 intel_calc_enabled_pipes(struct intel_atomic_state *state, + u8 enabled_pipes) +{ + const struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + if (crtc_state->hw.enable) + enabled_pipes |= BIT(crtc->pipe); + else + enabled_pipes &= ~BIT(crtc->pipe); + } + + return enabled_pipes; +} + u8 intel_calc_active_pipes(struct intel_atomic_state *state, u8 active_pipes) { @@ -5718,12 +5783,16 @@ static int intel_modeset_checks(struct intel_atomic_state *state) return 0; } -static bool lrr_params_changed(const struct drm_display_mode *old_adjusted_mode, - const struct drm_display_mode *new_adjusted_mode) +static bool lrr_params_changed(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) { + const struct drm_display_mode *old_adjusted_mode = &old_crtc_state->hw.adjusted_mode; + const struct drm_display_mode *new_adjusted_mode = &new_crtc_state->hw.adjusted_mode; + return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start || old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end || - old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal; + old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal || + old_crtc_state->set_context_latency != new_crtc_state->set_context_latency; } static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, @@ -5749,8 +5818,7 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta &new_crtc_state->dp_m_n)) new_crtc_state->update_m_n = false; - if (!lrr_params_changed(&old_crtc_state->hw.adjusted_mode, - &new_crtc_state->hw.adjusted_mode)) + if (!lrr_params_changed(old_crtc_state, new_crtc_state)) new_crtc_state->update_lrr = false; if (intel_crtc_needs_modeset(new_crtc_state)) @@ -5964,6 +6032,14 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state, return -EINVAL; } + /* FIXME: selective fetch should be disabled for async flips */ + if (new_crtc_state->enable_psr2_sel_fetch) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] async flip disallowed with PSR2 selective fetch\n", + crtc->base.base.id, crtc->base.name); + return -EINVAL; + } + for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { if (plane->pipe != crtc->pipe) @@ -6342,7 +6418,6 @@ int intel_atomic_check(struct drm_device *dev, struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_crtc *crtc; int ret, i; - bool any_ms = false; if (!intel_display_driver_check_access(display)) return -ENODEV; @@ -6450,14 +6525,11 @@ int intel_atomic_check(struct drm_device *dev, if (!intel_crtc_needs_modeset(new_crtc_state)) continue; - any_ms = true; - intel_dpll_release(state, crtc); } - if (any_ms && !check_digital_port_conflicts(state)) { - drm_dbg_kms(display->drm, - "rejecting conflicting digital port configuration\n"); + if (intel_any_crtc_needs_modeset(state) && !check_digital_port_conflicts(state)) { + drm_dbg_kms(display->drm, "rejecting conflicting digital port configuration\n"); ret = -EINVAL; goto fail; } @@ -6466,29 +6538,25 @@ int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + new_crtc_state->min_cdclk = intel_crtc_min_cdclk(new_crtc_state); + ret = intel_compute_global_watermarks(state); if (ret) goto fail; - ret = intel_bw_atomic_check(state, any_ms); + ret = intel_bw_atomic_check(state); if (ret) goto fail; - ret = intel_cdclk_atomic_check(state, &any_ms); + ret = intel_cdclk_atomic_check(state); if (ret) goto fail; - if (intel_any_crtc_needs_modeset(state)) - any_ms = true; - - if (any_ms) { + if (intel_any_crtc_needs_modeset(state)) { ret = intel_modeset_checks(state); if (ret) goto fail; - - ret = intel_modeset_calc_cdclk(state); - if (ret) - return ret; } ret = intel_pmdemand_atomic_check(state); @@ -6739,6 +6807,11 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state, intel_vrr_set_transcoder_timings(new_crtc_state); } + if (intel_casf_enabling(new_crtc_state, old_crtc_state)) + intel_casf_enable(new_crtc_state); + else if (new_crtc_state->hw.casf_params.strength != old_crtc_state->hw.casf_params.strength) + intel_casf_update_strength(new_crtc_state); + intel_fbc_update(state, crtc); drm_WARN_ON(display->drm, !intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF)); @@ -7307,7 +7380,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state, intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1); intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state); - intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit); + intel_dsb_wait_for_delayed_vblank(state, new_crtc_state->dsb_commit); intel_vrr_check_push_sent(new_crtc_state->dsb_commit, new_crtc_state); intel_dsb_interrupt(new_crtc_state->dsb_commit); @@ -7397,13 +7470,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) */ intel_pmdemand_pre_plane_update(state); - if (state->modeset) { + if (state->modeset) drm_atomic_helper_update_legacy_modeset_state(display->drm, &state->base); - intel_set_cdclk_pre_plane_update(state); + intel_set_cdclk_pre_plane_update(state); + if (state->modeset) intel_modeset_verify_disabled(state); - } intel_sagv_pre_plane_update(state); @@ -7516,8 +7589,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_verify_planes(state); intel_sagv_post_plane_update(state); - if (state->modeset) - intel_set_cdclk_post_plane_update(state); + intel_set_cdclk_post_plane_update(state); intel_pmdemand_post_plane_update(state); drm_atomic_helper_commit_hw_done(&state->base); @@ -8003,6 +8075,14 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev, mode->vtotal > vtotal_max) return MODE_V_ILLEGAL; + /* + * WM_LINETIME only goes up to (almost) 64 usec, and also + * knowing that the linetime is always bounded will ease the + * mind during various calculations. + */ + if (DIV_ROUND_UP(mode->htotal * 1000, mode->clock) > 64) + return MODE_H_ILLEGAL; + return MODE_OK; } @@ -8327,7 +8407,5 @@ void i830_disable_pipe(struct intel_display *display, enum pipe pipe) bool intel_scanout_needs_vtd_wa(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - - return IS_DISPLAY_VER(display, 6, 11) && i915_vtd_active(i915); + return IS_DISPLAY_VER(display, 6, 11) && intel_display_vtd_active(display); } diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 37e2ab301a80..bcc6ccb69d2b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -34,6 +34,7 @@ struct drm_atomic_state; struct drm_device; struct drm_display_mode; struct drm_encoder; +struct drm_format_info; struct drm_modeset_acquire_ctx; struct intel_atomic_state; struct intel_crtc; @@ -394,14 +395,19 @@ enum phy_fia { i) int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); +u8 intel_calc_enabled_pipes(struct intel_atomic_state *state, + u8 enabled_pipes); u8 intel_calc_active_pipes(struct intel_atomic_state *state, u8 active_pipes); void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, int bw_overhead, struct intel_link_m_n *m_n); -u32 intel_plane_fb_max_stride(struct drm_device *drm, - u32 pixel_format, u64 modifier); +u32 intel_plane_fb_max_stride(struct intel_display *display, + const struct drm_format_info *info, + u64 modifier); +u32 intel_dumb_fb_max_stride(struct drm_device *drm, + u32 pixel_format, u64 modifier); enum drm_mode_status intel_mode_valid_max_plane_size(struct intel_display *display, const struct drm_display_mode *mode, @@ -435,11 +441,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state); void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state); void i830_enable_pipe(struct intel_display *display, enum pipe pipe); void i830_disable_pipe(struct intel_display *display, enum pipe pipe); -int vlv_get_hpll_vco(struct drm_device *drm); -int vlv_get_cck_clock(struct drm_device *drm, - const char *name, u32 reg, int ref_freq); -int vlv_get_cck_clock_hpll(struct drm_device *drm, - const char *name, u32 reg); bool intel_has_pending_fb_unpin(struct intel_display *display); void intel_encoder_destroy(struct drm_encoder *encoder); struct drm_display_mode * @@ -528,7 +529,6 @@ void intel_init_display_hooks(struct intel_display *display); void intel_setup_outputs(struct intel_display *display); int intel_initial_commit(struct intel_display *display); void intel_panel_sanitize_ssc(struct intel_display *display); -void intel_update_czclk(struct intel_display *display); enum drm_mode_status intel_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode); int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c index d56065f22655..9a47aa38cf82 100644 --- a/drivers/gpu/drm/i915/display/intel_display_conversion.c +++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c @@ -1,15 +1,21 @@ // SPDX-License-Identifier: MIT /* Copyright © 2024 Intel Corporation */ -#include "i915_drv.h" -#include "intel_display_conversion.h" +#include <drm/intel/display_member.h> -static struct intel_display *__i915_to_display(struct drm_i915_private *i915) -{ - return i915->display; -} +#include "intel_display_conversion.h" struct intel_display *__drm_to_display(struct drm_device *drm) { - return __i915_to_display(to_i915(drm)); + /* + * Note: This relies on both struct drm_i915_private and struct + * xe_device having the struct drm_device and struct intel_display * + * members at the same relative offsets, as defined by struct + * __intel_generic_device. + * + * See also INTEL_DISPLAY_MEMBER_STATIC_ASSERT(). + */ + struct __intel_generic_device *d = container_of(drm, struct __intel_generic_device, drm); + + return d->display; } diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 8c226406c5cd..9b8414b77c15 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -41,6 +41,7 @@ struct intel_cdclk_vals; struct intel_color_funcs; struct intel_crtc; struct intel_crtc_state; +struct intel_display_parent_interface; struct intel_dmc; struct intel_dpll_global_funcs; struct intel_dpll_mgr; @@ -141,14 +142,13 @@ struct intel_dpll_global { }; struct intel_frontbuffer_tracking { + /* protects busy_bits */ spinlock_t lock; /* - * Tracking bits for delayed frontbuffer flushing du to gpu activity or - * scheduled flips. + * Tracking bits for delayed frontbuffer flushing due to gpu activity. */ unsigned busy_bits; - unsigned flip_bits; }; struct intel_hotplug { @@ -291,6 +291,9 @@ struct intel_display { /* Intel PCH: where the south display engine lives */ enum intel_pch pch_type; + /* Parent, or core, driver functions exposed to display */ + const struct intel_display_parent_interface *parent; + /* Display functions */ struct { /* Top level crtc-ish functions */ @@ -370,6 +373,10 @@ struct intel_display { } dbuf; struct { + struct intel_global_obj obj; + } dbuf_bw; + + struct { /* * dkl.phy_lock protects against concurrent access of the * Dekel TypeC PHYs. @@ -475,7 +482,21 @@ struct intel_display { struct work_struct vblank_notify_work; - u32 de_irq_mask[I915_MAX_PIPES]; + /* + * Cached value of VLV/CHV IMR to avoid reads in updating the + * bitfield. + */ + u32 vlv_imr_mask; + /* + * Cached value of gen 5-7 DE IMR to avoid reads in updating the + * bitfield. + */ + u32 ilk_de_imr_mask; + /* + * Cached value of BDW+ DE pipe IMR to avoid reads in updating + * the bitfield. + */ + u32 de_pipe_imr_mask[I915_MAX_PIPES]; u32 pipestat_irq_mask[I915_MAX_PIPES]; } irq; @@ -568,6 +589,11 @@ struct intel_display { } state; struct { + unsigned int hpll_freq; + unsigned int czclk_freq; + } vlv_clock; + + struct { /* ordered wq for modesets */ struct workqueue_struct *modeset; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 10dddec3796f..9bbfdae8d024 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -12,6 +12,7 @@ #include <drm/drm_edid.h> #include <drm/drm_file.h> #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include "hsw_ips.h" #include "i915_reg.h" @@ -47,6 +48,7 @@ #include "intel_psr_regs.h" #include "intel_vdsc.h" #include "intel_wm.h" +#include "intel_tc.h" static struct intel_display *node_to_intel_display(struct drm_info_node *node) { @@ -76,9 +78,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) seq_printf(m, "FB tracking busy bits: 0x%08x\n", display->fb_tracking.busy_bits); - seq_printf(m, "FB tracking flip bits: 0x%08x\n", - display->fb_tracking.flip_bits); - spin_unlock(&display->fb_tracking.lock); return 0; @@ -246,6 +245,8 @@ static void intel_connector_info(struct seq_file *m, { struct intel_connector *intel_connector = to_intel_connector(connector); const struct drm_display_mode *mode; + struct drm_printer p = drm_seq_file_printer(m); + struct intel_digital_port *dig_port = NULL; seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", connector->base.id, connector->name, @@ -268,14 +269,19 @@ static void intel_connector_info(struct seq_file *m, intel_dp_mst_info(m, intel_connector); else intel_dp_info(m, intel_connector); + dig_port = dp_to_dig_port(intel_attached_dp(intel_connector)); break; case DRM_MODE_CONNECTOR_HDMIA: intel_hdmi_info(m, intel_connector); + dig_port = hdmi_to_dig_port(intel_attached_hdmi(intel_connector)); break; default: break; } + if (dig_port != NULL && intel_encoder_is_tc(&dig_port->base)) + intel_tc_info(&p, dig_port); + intel_hdcp_info(m, intel_connector); seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc); diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index a002bc6ce7b0..1170afaa8680 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -1404,8 +1404,20 @@ static const struct platform_desc bmg_desc = { PLATFORM_GROUP(dgfx), }; +static const u16 wcl_ids[] = { + INTEL_WCL_IDS(ID), + 0 +}; + static const struct platform_desc ptl_desc = { PLATFORM(pantherlake), + .subplatforms = (const struct subplatform_desc[]) { + { + SUBPLATFORM(pantherlake, wildcatlake), + .pciidlist = wcl_ids, + }, + {}, + } }; __diag_pop(); @@ -1482,6 +1494,7 @@ static const struct { INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc), INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc), INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc), + INTEL_WCL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc), }; static const struct { @@ -1494,6 +1507,7 @@ static const struct { { 20, 0, &xe2_lpd_display }, { 30, 0, &xe2_lpd_display }, { 30, 2, &wcl_display }, + { 35, 0, &xe2_lpd_display }, }; static const struct intel_display_device_info * @@ -1634,7 +1648,8 @@ static void display_platforms_or(struct intel_display_platforms *dst, bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits()); } -struct intel_display *intel_display_device_probe(struct pci_dev *pdev) +struct intel_display *intel_display_device_probe(struct pci_dev *pdev, + const struct intel_display_parent_interface *parent) { struct intel_display *display; const struct intel_display_device_info *info; @@ -1650,6 +1665,8 @@ struct intel_display *intel_display_device_probe(struct pci_dev *pdev) /* Add drm device backpointer as early as possible. */ display->drm = pci_get_drvdata(pdev); + display->parent = parent; + intel_display_params_copy(&display->params); if (has_no_display(pdev)) { diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index f329f1beafef..b559ef43d547 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -13,6 +13,7 @@ struct drm_printer; struct intel_display; +struct intel_display_parent_interface; struct pci_dev; /* @@ -101,7 +102,9 @@ struct pci_dev; /* Display ver 14.1 (based on GMD ID) */ \ func(battlemage) \ /* Display ver 30 (based on GMD ID) */ \ - func(pantherlake) + func(pantherlake) \ + func(pantherlake_wildcatlake) + #define __MEMBER(name) unsigned long name:1; #define __COUNT(x) 1 + @@ -140,10 +143,13 @@ struct intel_display_platforms { func(overlay_needs_physical); \ func(supports_tv); +#define HAS_128B_Y_TILING(__display) (!(__display)->platform.i915g && !(__display)->platform.i915gm) #define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14) #define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5) #define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13) +#define HAS_AUX_CCS(__display) (IS_DISPLAY_VER(__display, 9, 12) || (__display)->platform.alderlake_p || (__display)->platform.meteorlake) #define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display)) +#define HAS_CASF(__display) (DISPLAY_VER(__display) >= 20) #define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl) #define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash) #define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20) @@ -155,7 +161,7 @@ struct intel_display_platforms { #define HAS_DISPLAY(__display) (DISPLAY_RUNTIME_INFO(__display)->pipe_mask != 0) #define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc) #define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20) -#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell) +#define HAS_DOUBLE_BUFFERED_M_N(__display) (IS_DISPLAY_VER((__display), 9, 14) || (__display)->platform.broadwell) #define HAS_DOUBLE_BUFFERED_LUT(__display) (DISPLAY_VER(__display) >= 30) #define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4) #define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14) @@ -308,7 +314,8 @@ struct intel_display_device_info { bool intel_display_device_present(struct intel_display *display); bool intel_display_device_enabled(struct intel_display *display); -struct intel_display *intel_display_device_probe(struct pci_dev *pdev); +struct intel_display *intel_display_device_probe(struct pci_dev *pdev, + const struct intel_display_parent_interface *parent); void intel_display_device_remove(struct intel_display *display); void intel_display_device_info_runtime_init(struct intel_display *display); diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index cf1c14412abe..7e000ba3e08b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -14,11 +14,12 @@ #include <drm/drm_client_event.h> #include <drm/drm_mode_config.h> #include <drm/drm_privacy_screen_consumer.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "i915_drv.h" -#include "i915_utils.h" +#include "i915_utils.h" /* for i915_inject_probe_failure() */ #include "i9xx_wm.h" #include "intel_acpi.h" #include "intel_atomic.h" @@ -28,12 +29,15 @@ #include "intel_cdclk.h" #include "intel_color.h" #include "intel_crtc.h" +#include "intel_cursor.h" +#include "intel_dbuf_bw.h" #include "intel_display_core.h" #include "intel_display_debugfs.h" #include "intel_display_driver.h" #include "intel_display_irq.h" #include "intel_display_power.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_display_wa.h" #include "intel_dkl_phy.h" #include "intel_dmc.h" @@ -145,17 +149,7 @@ static void intel_mode_config_init(struct intel_display *display) mode_config->max_height = 2048; } - if (display->platform.i845g || display->platform.i865g) { - mode_config->cursor_width = display->platform.i845g ? 64 : 512; - mode_config->cursor_height = 1023; - } else if (display->platform.i830 || display->platform.i85x || - display->platform.i915g || display->platform.i915gm) { - mode_config->cursor_width = 64; - mode_config->cursor_height = 64; - } else { - mode_config->cursor_width = 256; - mode_config->cursor_height = 256; - } + intel_cursor_mode_config_init(display); } static void intel_mode_config_cleanup(struct intel_display *display) @@ -285,6 +279,10 @@ int intel_display_driver_probe_noirq(struct intel_display *display) if (ret) goto cleanup_wq_unordered; + ret = intel_dbuf_bw_init(display); + if (ret) + goto cleanup_wq_unordered; + ret = intel_bw_init(display); if (ret) goto cleanup_wq_unordered; @@ -482,7 +480,6 @@ int intel_display_driver_probe_nogem(struct intel_display *display) intel_dpll_init(display); intel_fdi_pll_freq_update(display); - intel_update_czclk(display); intel_display_driver_init_hw(display); intel_dpll_update_ref_clks(display); diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index 123e054affbe..43b27deb4a26 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -3,6 +3,7 @@ * Copyright © 2023 Intel Corporation */ +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "i915_drv.h" @@ -140,14 +141,14 @@ void ilk_update_display_irq(struct intel_display *display, lockdep_assert_held(&display->irq.lock); drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); - new_val = dev_priv->irq_mask; + new_val = display->irq.ilk_de_imr_mask; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != dev_priv->irq_mask && + if (new_val != display->irq.ilk_de_imr_mask && !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) { - dev_priv->irq_mask = new_val; - intel_de_write(display, DEIMR, dev_priv->irq_mask); + display->irq.ilk_de_imr_mask = new_val; + intel_de_write(display, DEIMR, display->irq.ilk_de_imr_mask); intel_de_posting_read(display, DEIMR); } } @@ -215,13 +216,13 @@ static void bdw_update_pipe_irq(struct intel_display *display, if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) return; - new_val = display->irq.de_irq_mask[pipe]; + new_val = display->irq.de_pipe_imr_mask[pipe]; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != display->irq.de_irq_mask[pipe]) { - display->irq.de_irq_mask[pipe] = new_val; - intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]); + if (new_val != display->irq.de_pipe_imr_mask[pipe]) { + display->irq.de_pipe_imr_mask[pipe] = new_val; + intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_pipe_imr_mask[pipe]); intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe)); } } @@ -872,7 +873,7 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display) } } -void ilk_display_irq_handler(struct intel_display *display, u32 de_iir) +static void _ilk_display_irq_handler(struct intel_display *display, u32 de_iir) { enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; @@ -923,7 +924,7 @@ void ilk_display_irq_handler(struct intel_display *display, u32 de_iir) ilk_display_rps_irq_handler(display); } -void ivb_display_irq_handler(struct intel_display *display, u32 de_iir) +static void _ivb_display_irq_handler(struct intel_display *display, u32 de_iir) { enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; @@ -972,6 +973,53 @@ void ivb_display_irq_handler(struct intel_display *display, u32 de_iir) } } +void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier) +{ + /* disable master interrupt before clearing iir */ + *de_ier = intel_de_read_fw(display, DEIER); + intel_de_write_fw(display, DEIER, *de_ier & ~DE_MASTER_IRQ_CONTROL); + + /* + * Disable south interrupts. We'll only write to SDEIIR once, so further + * interrupts will be stored on its back queue, and then we'll be able + * to process them after we restore SDEIER (as soon as we restore it, + * we'll get an interrupt if SDEIIR still has something to process due + * to its back queue). + */ + if (!HAS_PCH_NOP(display)) { + *sde_ier = intel_de_read_fw(display, SDEIER); + intel_de_write_fw(display, SDEIER, 0); + } else { + *sde_ier = 0; + } +} + +void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier) +{ + intel_de_write_fw(display, DEIER, de_ier); + + if (sde_ier) + intel_de_write_fw(display, SDEIER, sde_ier); +} + +bool ilk_display_irq_handler(struct intel_display *display) +{ + u32 de_iir; + bool handled = false; + + de_iir = intel_de_read_fw(display, DEIIR); + if (de_iir) { + intel_de_write_fw(display, DEIIR, de_iir); + if (DISPLAY_VER(display) >= 7) + _ivb_display_irq_handler(display, de_iir); + else + _ilk_display_irq_handler(display, de_iir); + handled = true; + } + + return handled; +} + static u32 gen8_de_port_aux_mask(struct intel_display *display) { u32 mask; @@ -1865,8 +1913,6 @@ void vlv_display_error_irq_handler(struct intel_display *display, static void _vlv_display_irq_reset(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (display->platform.cherryview) intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else @@ -1881,7 +1927,7 @@ static void _vlv_display_irq_reset(struct intel_display *display) i9xx_pipestat_irq_reset(display); intel_display_irq_regs_reset(display, VLV_IRQ_REGS); - dev_priv->irq_mask = ~0u; + display->irq.vlv_imr_mask = ~0u; } void vlv_display_irq_reset(struct intel_display *display) @@ -1902,6 +1948,22 @@ void i9xx_display_irq_reset(struct intel_display *display) i9xx_pipestat_irq_reset(display); } +u32 i9xx_display_irq_enable_mask(struct intel_display *display) +{ + u32 enable_mask; + + enable_mask = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + + if (DISPLAY_VER(display) >= 3) + enable_mask |= I915_ASLE_INTERRUPT; + + if (HAS_HOTPLUG(display)) + enable_mask |= I915_DISPLAY_PORT_INTERRUPT; + + return enable_mask; +} + void i915_display_irq_postinstall(struct intel_display *display) { /* @@ -1939,7 +2001,6 @@ static u32 vlv_error_mask(void) static void _vlv_display_irq_postinstall(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 pipestat_mask; u32 enable_mask; enum pipe pipe; @@ -1973,11 +2034,11 @@ static void _vlv_display_irq_postinstall(struct intel_display *display) enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | I915_LPE_PIPE_C_INTERRUPT; - drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u); + drm_WARN_ON(display->drm, display->irq.vlv_imr_mask != ~0u); - dev_priv->irq_mask = ~enable_mask; + display->irq.vlv_imr_mask = ~enable_mask; - intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask); + intel_display_irq_regs_init(display, VLV_IRQ_REGS, display->irq.vlv_imr_mask, enable_mask); } void vlv_display_irq_postinstall(struct intel_display *display) @@ -1988,7 +2049,7 @@ void vlv_display_irq_postinstall(struct intel_display *display) spin_unlock_irq(&display->irq.lock); } -void ibx_display_irq_reset(struct intel_display *display) +static void ibx_display_irq_reset(struct intel_display *display) { if (HAS_PCH_NOP(display)) return; @@ -1999,6 +2060,24 @@ void ibx_display_irq_reset(struct intel_display *display) intel_de_write(display, SERR_INT, 0xffffffff); } +void ilk_display_irq_reset(struct intel_display *display) +{ + struct intel_uncore *uncore = to_intel_uncore(display->drm); + + gen2_irq_reset(uncore, DE_IRQ_REGS); + display->irq.ilk_de_imr_mask = ~0u; + + if (DISPLAY_VER(display) == 7) + intel_de_write(display, GEN7_ERR_INT, 0xffffffff); + + if (display->platform.haswell) { + intel_de_write(display, EDP_PSR_IMR, 0xffffffff); + intel_de_write(display, EDP_PSR_IIR, 0xffffffff); + } + + ibx_display_irq_reset(display); +} + void gen8_display_irq_reset(struct intel_display *display) { enum pipe pipe; @@ -2088,8 +2167,8 @@ void gen8_irq_power_well_post_enable(struct intel_display *display, for_each_pipe_masked(display, pipe, pipe_mask) intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), - display->irq.de_irq_mask[pipe], - ~display->irq.de_irq_mask[pipe] | extra_ier); + display->irq.de_pipe_imr_mask[pipe], + ~display->irq.de_pipe_imr_mask[pipe] | extra_ier); spin_unlock_irq(&display->irq.lock); } @@ -2183,8 +2262,6 @@ out: void ilk_de_irq_postinstall(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - u32 display_mask, extra_mask; if (DISPLAY_VER(display) >= 7) { @@ -2216,11 +2293,11 @@ void ilk_de_irq_postinstall(struct intel_display *display) if (display->platform.ironlake && display->platform.mobile) extra_mask |= DE_PCU_EVENT; - i915->irq_mask = ~display_mask; + display->irq.ilk_de_imr_mask = ~display_mask; ibx_irq_postinstall(display); - intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask, + intel_display_irq_regs_init(display, DE_IRQ_REGS, display->irq.ilk_de_imr_mask, display_mask | extra_mask); } @@ -2305,12 +2382,12 @@ void gen8_de_irq_postinstall(struct intel_display *display) } for_each_pipe(display, pipe) { - display->irq.de_irq_mask[pipe] = ~de_pipe_masked; + display->irq.de_pipe_imr_mask[pipe] = ~de_pipe_masked; if (intel_display_power_is_enabled(display, POWER_DOMAIN_PIPE(pipe))) intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), - display->irq.de_irq_mask[pipe], + display->irq.de_pipe_imr_mask[pipe], de_pipe_enables); } diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h index c66db3851da4..84acd31948cf 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.h +++ b/drivers/gpu/drm/i915/display/intel_display_irq.h @@ -47,8 +47,9 @@ void i965_disable_vblank(struct drm_crtc *crtc); void ilk_disable_vblank(struct drm_crtc *crtc); void bdw_disable_vblank(struct drm_crtc *crtc); -void ivb_display_irq_handler(struct intel_display *display, u32 de_iir); -void ilk_display_irq_handler(struct intel_display *display, u32 de_iir); +void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier); +void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier); +bool ilk_display_irq_handler(struct intel_display *display); void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl); void gen11_display_irq_handler(struct intel_display *display); @@ -56,11 +57,12 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl); void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir); void i9xx_display_irq_reset(struct intel_display *display); -void ibx_display_irq_reset(struct intel_display *display); +void ilk_display_irq_reset(struct intel_display *display); void vlv_display_irq_reset(struct intel_display *display); void gen8_display_irq_reset(struct intel_display *display); void gen11_display_irq_reset(struct intel_display *display); +u32 i9xx_display_irq_enable_mask(struct intel_display *display); void i915_display_irq_postinstall(struct intel_display *display); void i965_display_irq_postinstall(struct intel_display *display); void vlv_display_irq_postinstall(struct intel_display *display); diff --git a/drivers/gpu/drm/i915/display/intel_display_jiffies.h b/drivers/gpu/drm/i915/display/intel_display_jiffies.h new file mode 100644 index 000000000000..c060c567e262 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_jiffies.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __INTEL_DISPLAY_JIFFIES_H__ +#define __INTEL_DISPLAY_JIFFIES_H__ + +#include <linux/jiffies.h> + +static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) +{ + unsigned long j = msecs_to_jiffies(m); + + return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); +} + +/* + * If you need to wait X milliseconds between events A and B, but event B + * doesn't happen exactly after event A, you record the timestamp (jiffies) of + * when event A happened, then just before event B you call this function and + * pass the timestamp as the first argument, and X as the second argument. + */ +static inline void +wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) +{ + unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; + + /* + * Don't re-read the value of "jiffies" every time since it may change + * behind our back and break the math. + */ + tmp_jiffies = jiffies; + target_jiffies = timestamp_jiffies + + msecs_to_jiffies_timeout(to_wait_ms); + + if (time_after(target_jiffies, tmp_jiffies)) { + remaining_jiffies = target_jiffies - tmp_jiffies; + while (remaining_jiffies) + remaining_jiffies = + schedule_timeout_uninterruptible(remaining_jiffies); + } +} + +#endif /* __INTEL_DISPLAY_JIFFIES_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index da4babfd6bcb..2a4cc1dcc293 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -6,12 +6,13 @@ #include <linux/iopoll.h> #include <linux/string_helpers.h> +#include <drm/drm_print.h> + #include "soc/intel_dram.h" #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" -#include "i915_utils.h" #include "intel_backlight_regs.h" #include "intel_cdclk.h" #include "intel_clock_gating.h" @@ -23,6 +24,7 @@ #include "intel_display_regs.h" #include "intel_display_rpm.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dmc.h" #include "intel_mchbar_regs.h" #include "intel_pch_refclk.h" @@ -1290,9 +1292,8 @@ static void hsw_disable_lcpll(struct intel_display *display, val |= LCPLL_CD_SOURCE_FCLK; intel_de_write(display, LCPLL_CTL, val); - ret = intel_de_wait_custom(display, LCPLL_CTL, - LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE, - 1, 0, NULL); + ret = intel_de_wait_for_set_us(display, LCPLL_CTL, + LCPLL_CD_SOURCE_FCLK_DONE, 1); if (ret) drm_err(display->drm, "Switching to FCLK failed\n"); @@ -1303,7 +1304,7 @@ static void hsw_disable_lcpll(struct intel_display *display, intel_de_write(display, LCPLL_CTL, val); intel_de_posting_read(display, LCPLL_CTL); - if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) + if (intel_de_wait_for_clear_ms(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) drm_err(display->drm, "LCPLL still locked\n"); val = hsw_read_dcomp(display); @@ -1360,15 +1361,14 @@ static void hsw_restore_lcpll(struct intel_display *display) val &= ~LCPLL_PLL_DISABLE; intel_de_write(display, LCPLL_CTL, val); - if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) + if (intel_de_wait_for_set_ms(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) drm_err(display->drm, "LCPLL not locked yet\n"); if (val & LCPLL_CD_SOURCE_FCLK) { intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); - ret = intel_de_wait_custom(display, LCPLL_CTL, - LCPLL_CD_SOURCE_FCLK_DONE, 0, - 1, 0, NULL); + ret = intel_de_wait_for_clear_us(display, LCPLL_CTL, + LCPLL_CD_SOURCE_FCLK_DONE, 1); if (ret) drm_err(display->drm, "Switching back to LCPLL failed\n"); @@ -1436,6 +1436,9 @@ static void intel_pch_reset_handshake(struct intel_display *display, i915_reg_t reg; u32 reset_bits; + if (DISPLAY_VER(display) >= 35) + return; + if (display->platform.ivybridge) { reg = GEN7_MSG_CTL; reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 39b71fffa2cd..9b49952994ce 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1516,7 +1516,11 @@ static const struct i915_power_well_desc xelpdp_power_wells_main[] = { .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_D), .has_fuses = true, - }, { + }, +}; + +static const struct i915_power_well_desc xelpdp_power_wells_aux[] = { + { .instances = &I915_PW_INSTANCES( I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A), I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B), @@ -1534,6 +1538,7 @@ static const struct i915_power_well_desc_list xelpdp_power_wells[] = { I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off), I915_PW_DESCRIPTORS(xelpdp_power_wells_main), + I915_PW_DESCRIPTORS(xelpdp_power_wells_aux), }; I915_DECL_PW_DOMAINS(xe2lpd_pwdoms_pica_tc, @@ -1584,6 +1589,7 @@ static const struct i915_power_well_desc_list xe2lpd_power_wells[] = { I915_PW_DESCRIPTORS(xe2lpd_power_wells_dcoff), I915_PW_DESCRIPTORS(xelpdp_power_wells_main), I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica), + I915_PW_DESCRIPTORS(xelpdp_power_wells_aux), }; /* @@ -1677,16 +1683,6 @@ static const struct i915_power_well_desc xe3lpd_power_wells_main[] = { .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_D), .has_fuses = true, - }, { - .instances = &I915_PW_INSTANCES( - I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A), - I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B), - I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1), - I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2), - I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3), - I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4), - ), - .ops = &xelpdp_aux_power_well_ops, }, }; @@ -1715,6 +1711,7 @@ static const struct i915_power_well_desc_list xe3lpd_power_wells[] = { I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff), I915_PW_DESCRIPTORS(xe3lpd_power_wells_main), I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica), + I915_PW_DESCRIPTORS(xelpdp_power_wells_aux), }; static const struct i915_power_well_desc wcl_power_wells_main[] = { @@ -1751,7 +1748,11 @@ static const struct i915_power_well_desc wcl_power_wells_main[] = { .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_C), .has_fuses = true, - }, { + }, +}; + +static const struct i915_power_well_desc wcl_power_wells_aux[] = { + { .instances = &I915_PW_INSTANCES( I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A), I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B), @@ -1768,6 +1769,7 @@ static const struct i915_power_well_desc_list wcl_power_wells[] = { I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff), I915_PW_DESCRIPTORS(wcl_power_wells_main), I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica), + I915_PW_DESCRIPTORS(wcl_power_wells_aux), }; static void init_power_well_domains(const struct i915_power_well_instance *inst, diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 5e88b930f5aa..f4f7e73acc87 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -5,6 +5,8 @@ #include <linux/iopoll.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" @@ -291,8 +293,8 @@ static void hsw_wait_for_power_well_enable(struct intel_display *display, } /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ - if (intel_de_wait_for_set(display, regs->driver, - HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) { + if (intel_de_wait_for_set_ms(display, regs->driver, + HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) { drm_dbg_kms(display->drm, "%s power well enable timeout\n", intel_power_well_name(power_well)); @@ -336,9 +338,9 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display, */ reqs = hsw_power_well_requesters(display, regs, pw_idx); - ret = intel_de_wait_for_clear(display, regs->driver, - HSW_PWR_WELL_CTL_STATE(pw_idx), - reqs ? 0 : 1); + ret = intel_de_wait_for_clear_ms(display, regs->driver, + HSW_PWR_WELL_CTL_STATE(pw_idx), + reqs ? 0 : 1); if (!ret) return; @@ -357,8 +359,8 @@ static void gen9_wait_for_power_well_fuses(struct intel_display *display, { /* Timeout 5us for PG#0, for other PGs 1us */ drm_WARN_ON(display->drm, - intel_de_wait_for_set(display, SKL_FUSE_STATUS, - SKL_FUSE_PG_DIST_STATUS(pg), 1)); + intel_de_wait_for_set_ms(display, SKL_FUSE_STATUS, + SKL_FUSE_PG_DIST_STATUS(pg), 1)); } static void hsw_power_well_enable(struct intel_display *display, @@ -1356,6 +1358,7 @@ static void assert_chv_phy_status(struct intel_display *display) u32 phy_control = display->power.chv_phy_control; u32 phy_status = 0; u32 phy_status_mask = 0xffffffff; + u32 val; /* * The BIOS can leave the PHY is some weird state @@ -1443,12 +1446,11 @@ static void assert_chv_phy_status(struct intel_display *display) * The PHY may be busy with some initial calibration and whatnot, * so the power state can take a while to actually change. */ - if (intel_de_wait(display, DISPLAY_PHY_STATUS, - phy_status_mask, phy_status, 10)) + if (intel_de_wait_ms(display, DISPLAY_PHY_STATUS, + phy_status_mask, phy_status, 10, &val)) drm_err(display->drm, "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", - intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask, - phy_status, display->power.chv_phy_control); + val & phy_status_mask, phy_status, display->power.chv_phy_control); } #undef BITS_SET @@ -1474,8 +1476,8 @@ static void chv_dpio_cmn_power_well_enable(struct intel_display *display, vlv_set_power_well(display, power_well, true); /* Poll for phypwrgood signal */ - if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS, - PHY_POWERGOOD(phy), 1)) + if (intel_de_wait_for_set_ms(display, DISPLAY_PHY_STATUS, + PHY_POWERGOOD(phy), 1)) drm_err(display->drm, "Display PHY %d is not power up\n", phy); @@ -1864,18 +1866,36 @@ static void xelpdp_aux_power_well_enable(struct intel_display *display, * expected to just wait a fixed 600us after raising the request * bit. */ - usleep_range(600, 1200); + if (DISPLAY_VER(display) >= 35) { + if (intel_de_wait_for_set_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch), + XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 2)) + drm_warn(display->drm, + "Timeout waiting for PHY %c AUX channel power to be up\n", + phy_name(phy)); + } else { + usleep_range(600, 1200); + } } static void xelpdp_aux_power_well_disable(struct intel_display *display, struct i915_power_well *power_well) { enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; + enum phy phy = icl_aux_pw_to_phy(display, power_well); intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch), XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, 0); - usleep_range(10, 30); + + if (DISPLAY_VER(display) >= 35) { + if (intel_de_wait_for_clear_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch), + XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 1)) + drm_warn(display->drm, + "Timeout waiting for PHY %c AUX channel to powerdown\n", + phy_name(phy)); + } else { + usleep_range(10, 30); + } } static bool xelpdp_aux_power_well_enabled(struct intel_display *display, @@ -1893,8 +1913,8 @@ static void xe2lpd_pica_power_well_enable(struct intel_display *display, intel_de_write(display, XE2LPD_PICA_PW_CTL, XE2LPD_PICA_CTL_POWER_REQUEST); - if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL, - XE2LPD_PICA_CTL_POWER_STATUS, 1)) { + if (intel_de_wait_for_set_ms(display, XE2LPD_PICA_PW_CTL, + XE2LPD_PICA_CTL_POWER_STATUS, 1)) { drm_dbg_kms(display->drm, "pica power well enable timeout\n"); drm_WARN(display->drm, 1, "Power well PICA timeout when enabled"); @@ -1906,8 +1926,8 @@ static void xe2lpd_pica_power_well_disable(struct intel_display *display, { intel_de_write(display, XE2LPD_PICA_PW_CTL, 0); - if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL, - XE2LPD_PICA_CTL_POWER_STATUS, 1)) { + if (intel_de_wait_for_clear_ms(display, XE2LPD_PICA_PW_CTL, + XE2LPD_PICA_CTL_POWER_STATUS, 1)) { drm_dbg_kms(display->drm, "pica power well disable timeout\n"); drm_WARN(display->drm, 1, "Power well PICA timeout when disabled"); diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c index f5f38dca14d7..03e8c68d2913 100644 --- a/drivers/gpu/drm/i915/display/intel_display_reset.c +++ b/drivers/gpu/drm/i915/display/intel_display_reset.c @@ -4,6 +4,7 @@ */ #include <drm/drm_atomic_helper.h> +#include <drm/drm_print.h> #include "i915_drv.h" #include "intel_clock_gating.h" diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c index 56c4024201c1..0a331f89b4db 100644 --- a/drivers/gpu/drm/i915/display/intel_display_rpm.c +++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c @@ -1,69 +1,62 @@ // SPDX-License-Identifier: MIT /* Copyright © 2025 Intel Corporation */ -#include "i915_drv.h" +#include <drm/intel/display_parent_interface.h> + #include "intel_display_core.h" #include "intel_display_rpm.h" -#include "intel_runtime_pm.h" - -static struct intel_runtime_pm *display_to_rpm(struct intel_display *display) -{ - struct drm_i915_private *i915 = to_i915(display->drm); - - return &i915->runtime_pm; -} struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display) { - return intel_runtime_pm_get_raw(display_to_rpm(display)); + return display->parent->rpm->get_raw(display->drm); } void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref) { - intel_runtime_pm_put_raw(display_to_rpm(display), wakeref); + display->parent->rpm->put_raw(display->drm, wakeref); } struct ref_tracker *intel_display_rpm_get(struct intel_display *display) { - return intel_runtime_pm_get(display_to_rpm(display)); + return display->parent->rpm->get(display->drm); } struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display) { - return intel_runtime_pm_get_if_in_use(display_to_rpm(display)); + return display->parent->rpm->get_if_in_use(display->drm); } struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display) { - return intel_runtime_pm_get_noresume(display_to_rpm(display)); + return display->parent->rpm->get_noresume(display->drm); } void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref) { - intel_runtime_pm_put(display_to_rpm(display), wakeref); + display->parent->rpm->put(display->drm, wakeref); } void intel_display_rpm_put_unchecked(struct intel_display *display) { - intel_runtime_pm_put_unchecked(display_to_rpm(display)); + display->parent->rpm->put_unchecked(display->drm); } bool intel_display_rpm_suspended(struct intel_display *display) { - return intel_runtime_pm_suspended(display_to_rpm(display)); + return display->parent->rpm->suspended(display->drm); } void assert_display_rpm_held(struct intel_display *display) { - assert_rpm_wakelock_held(display_to_rpm(display)); + display->parent->rpm->assert_held(display->drm); } void intel_display_rpm_assert_block(struct intel_display *display) { - disable_rpm_wakeref_asserts(display_to_rpm(display)); + display->parent->rpm->assert_block(display->drm); } void intel_display_rpm_assert_unblock(struct intel_display *display) { - enable_rpm_wakeref_asserts(display_to_rpm(display)); + display->parent->rpm->assert_unblock(display->drm); } diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 358ab922d7a7..38702a9e0f50 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -551,7 +551,16 @@ struct intel_connector { u8 fec_capability; u8 dsc_hblank_expansion_quirk:1; + u8 dsc_throughput_quirk:1; u8 dsc_decompression_enabled:1; + + struct { + struct { + int rgb_yuv444; + int yuv422_420; + } overall_throughput; + int max_line_width; + } dsc_branch_caps; } dp; struct { @@ -717,7 +726,6 @@ struct intel_initial_plane_config { struct intel_memory_region *mem; resource_size_t phys_base; struct i915_vma *vma; - unsigned int tiling; int size; u32 base; u8 rotation; @@ -946,6 +954,26 @@ struct intel_csc_matrix { u16 postoff[3]; }; +enum intel_panel_replay_dsc_support { + INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED, + INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY, + INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE, +}; + +struct scaler_filter_coeff { + u16 sign; + u16 exp; + u16 mantissa; +}; + +struct intel_casf { + #define SCALER_FILTER_NUM_TAPS 7 + struct scaler_filter_coeff coeff[SCALER_FILTER_NUM_TAPS]; + u8 strength; + u8 win_size; + bool casf_enable; +}; + struct intel_crtc_state { /* * uapi (drm) state. This is the software state shown to userspace. @@ -982,6 +1010,7 @@ struct intel_crtc_state { struct drm_property_blob *degamma_lut, *gamma_lut, *ctm; struct drm_display_mode mode, pipe_mode, adjusted_mode; enum drm_scaling_filter scaling_filter; + struct intel_casf casf_params; } hw; /* actual state of LUTs */ @@ -1124,9 +1153,12 @@ struct intel_crtc_state { bool has_panel_replay; bool wm_level_disabled; bool pkg_c_latency_used; + /* Only used for state verification. */ + enum intel_panel_replay_dsc_support panel_replay_dsc_support; u32 dc3co_exitline; u16 su_y_granularity; u8 active_non_psr_pipes; + const char *no_psr_reason; /* * Frequency the dpll for the port should run at. Differs from the @@ -1183,7 +1215,9 @@ struct intel_crtc_state { struct intel_crtc_wm_state wm; - int min_cdclk[I915_MAX_PLANES]; + int min_cdclk; + + int plane_min_cdclk[I915_MAX_PLANES]; /* for packed/planar CbCr */ u32 data_rate[I915_MAX_PLANES]; @@ -1268,6 +1302,8 @@ struct intel_crtc_state { /* Display Stream compression state */ struct { + /* Only used for state computation, not read out from the HW. */ + bool compression_enabled_on_link; bool compression_enable; int num_streams; /* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */ @@ -1341,6 +1377,20 @@ struct intel_crtc_state { /* LOBF flag */ bool has_lobf; + + /* W2 window or 'set context latency' lines */ + u16 set_context_latency; + + struct { + u8 io_wake_lines; + u8 fast_wake_lines; + + /* LNL and beyond */ + u8 check_entry_lines; + u8 aux_less_wake_lines; + u8 silence_period_sym_clocks; + u8 lfps_half_cycle_num_of_syms; + } alpm_state; }; enum intel_pipe_crc_source { @@ -1513,8 +1563,8 @@ struct intel_plane { const struct drm_framebuffer *fb, int color_plane); unsigned int (*max_stride)(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation); + const struct drm_format_info *info, + u64 modifier, unsigned int rotation); bool (*can_async_flip)(u64 modifier); /* Write all non-self arming plane registers */ void (*update_noarm)(struct intel_dsb *dsb, @@ -1679,16 +1729,22 @@ struct intel_psr { bool source_panel_replay_support; bool sink_panel_replay_support; bool sink_panel_replay_su_support; + enum intel_panel_replay_dsc_support sink_panel_replay_dsc_support; bool panel_replay_enabled; u32 dc3co_exitline; u32 dc3co_exit_delay; struct delayed_work dc3co_work; u8 entry_setup_frames; + u8 io_wake_lines; + u8 fast_wake_lines; + bool link_ok; bool pkg_c_latency_used; u8 active_non_psr_pipes; + + const char *no_psr_reason; }; struct intel_dp { @@ -1844,19 +1900,12 @@ struct intel_dp { bool colorimetry_support; struct { - u8 io_wake_lines; - u8 fast_wake_lines; enum transcoder transcoder; struct mutex lock; - /* LNL and beyond */ - u8 check_entry_lines; - u8 aux_less_wake_lines; - u8 silence_period_sym_clocks; - u8 lfps_half_cycle_num_of_syms; bool lobf_disable_debug; bool sink_alpm_error; - } alpm_parameters; + } alpm; u8 alpm_dpcd; diff --git a/drivers/gpu/drm/i915/display/intel_display_utils.c b/drivers/gpu/drm/i915/display/intel_display_utils.c new file mode 100644 index 000000000000..04d010f7c23e --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_utils.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include <linux/device.h> + +#include <drm/drm_device.h> + +#ifdef CONFIG_X86 +#include <asm/hypervisor.h> +#endif + +#include "intel_display_core.h" +#include "intel_display_utils.h" + +bool intel_display_run_as_guest(struct intel_display *display) +{ +#if IS_ENABLED(CONFIG_X86) + return !hypervisor_is_type(X86_HYPER_NATIVE); +#else + /* Not supported yet */ + return false; +#endif +} + +bool intel_display_vtd_active(struct intel_display *display) +{ + if (device_iommu_mapped(display->drm->dev)) + return true; + + /* Running as a guest, we assume the host is enforcing VT'd */ + return intel_display_run_as_guest(display); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_utils.h b/drivers/gpu/drm/i915/display/intel_display_utils.h new file mode 100644 index 000000000000..2a18f160320c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_utils.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __INTEL_DISPLAY_UTILS__ +#define __INTEL_DISPLAY_UTILS__ + +#include <linux/bug.h> +#include <linux/types.h> + +struct intel_display; + +#ifndef MISSING_CASE +#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ + __stringify(x), (long)(x)) +#endif + +#ifndef fetch_and_zero +#define fetch_and_zero(ptr) ({ \ + typeof(*ptr) __T = *(ptr); \ + *(ptr) = (typeof(*ptr))0; \ + __T; \ +}) +#endif + +#define KHz(x) (1000 * (x)) +#define MHz(x) KHz(1000 * (x)) + +bool intel_display_run_as_guest(struct intel_display *display); +bool intel_display_vtd_active(struct intel_display *display); + +#endif /* __INTEL_DISPLAY_UTILS__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c index 31cd2c9cd488..e38e5e87877c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_wa.c +++ b/drivers/gpu/drm/i915/display/intel_display_wa.c @@ -49,7 +49,8 @@ void intel_display_wa_apply(struct intel_display *display) */ static bool intel_display_needs_wa_16025573575(struct intel_display *display) { - return DISPLAY_VERx100(display) == 3000 || DISPLAY_VERx100(display) == 3002; + return DISPLAY_VERx100(display) == 3000 || DISPLAY_VERx100(display) == 3002 || + DISPLAY_VERx100(display) == 3500; } /* @@ -67,6 +68,8 @@ bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, return intel_display_needs_wa_16025573575(display); case INTEL_DISPLAY_WA_14011503117: return DISPLAY_VER(display) == 13; + case INTEL_DISPLAY_WA_22014263786: + return IS_DISPLAY_VERx100(display, 1100, 1400); default: drm_WARN(display->drm, 1, "Missing Wa number: %s\n", name); break; diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h index abc1df83f066..3644e8e2b724 100644 --- a/drivers/gpu/drm/i915/display/intel_display_wa.h +++ b/drivers/gpu/drm/i915/display/intel_display_wa.h @@ -25,6 +25,7 @@ enum intel_display_wa { INTEL_DISPLAY_WA_16023588340, INTEL_DISPLAY_WA_16025573575, INTEL_DISPLAY_WA_14011503117, + INTEL_DISPLAY_WA_22014263786, }; bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 4a4cace1f879..6ebbd97e6351 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -30,13 +30,13 @@ #include <drm/drm_print.h> #include "i915_reg.h" -#include "i915_utils.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_power_well.h" #include "intel_display_regs.h" #include "intel_display_rpm.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dmc.h" #include "intel_dmc_regs.h" #include "intel_flipq.h" @@ -127,6 +127,12 @@ static bool dmc_firmware_param_disabled(struct intel_display *display) #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE +#define XE3P_LPD_DMC_PATH DMC_PATH(xe3p_lpd) +MODULE_FIRMWARE(XE3P_LPD_DMC_PATH); + +#define XE3LPD_3002_DMC_PATH DMC_PATH(xe3lpd_3002) +MODULE_FIRMWARE(XE3LPD_3002_DMC_PATH); + #define XE3LPD_DMC_PATH DMC_PATH(xe3lpd) MODULE_FIRMWARE(XE3LPD_DMC_PATH); @@ -184,8 +190,13 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size const char *fw_path = NULL; u32 max_fw_size = 0; - if (DISPLAY_VERx100(display) == 3002 || - DISPLAY_VERx100(display) == 3000) { + if (DISPLAY_VERx100(display) == 3500) { + fw_path = XE3P_LPD_DMC_PATH; + max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; + } else if (DISPLAY_VERx100(display) == 3002) { + fw_path = XE3LPD_3002_DMC_PATH; + max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; + } else if (DISPLAY_VERx100(display) == 3000) { fw_path = XE3LPD_DMC_PATH; max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; } else if (DISPLAY_VERx100(display) == 2000) { @@ -509,10 +520,16 @@ static u32 pipedmc_interrupt_mask(struct intel_display *display) PIPEDMC_ATS_FAULT; } -static u32 dmc_evt_ctl_disable(void) +static u32 dmc_evt_ctl_disable(u32 dmc_evt_ctl) { - return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, - DMC_EVT_CTL_TYPE_EDGE_0_1) | + /* + * DMC_EVT_CTL_ENABLE cannot be cleared once set. Always + * configure it based on the original event definition to + * avoid mismatches in assert_dmc_loaded(). + */ + return (dmc_evt_ctl & DMC_EVT_CTL_ENABLE) | + REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, + DMC_EVT_CTL_TYPE_EDGE_0_1) | REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, DMC_EVENT_FALSE); } @@ -573,6 +590,21 @@ static bool fixup_dmc_evt(struct intel_display *display, return true; } + /* + * TGL/ADL-S DMC firmware incorrectly uses the undelayed vblank + * event for the HRR handler, when it should be using the delayed + * vblank event instead. Fixed firmware was never released + * so the Windows driver just hacks around it by overriding + * the event ID. Do the same. + */ + if ((display->platform.tigerlake || display->platform.alderlake_s) && + is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) { + *data_ctl &= ~DMC_EVT_CTL_EVENT_ID_MASK; + *data_ctl |= REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, + MAINDMC_EVENT_VBLANK_DELAYED_A); + return true; + } + return false; } @@ -594,7 +626,7 @@ static bool disable_dmc_evt(struct intel_display *display, /* also disable the HRR event on the main DMC on TGL/ADLS */ if ((display->platform.tigerlake || display->platform.alderlake_s) && - is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data)) + is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_DELAYED_A, reg, data)) return true; return false; @@ -607,7 +639,7 @@ static u32 dmc_mmiodata(struct intel_display *display, if (disable_dmc_evt(display, dmc_id, dmc->dmc_info[dmc_id].mmioaddr[i], dmc->dmc_info[dmc_id].mmiodata[i])) - return dmc_evt_ctl_disable(); + return dmc_evt_ctl_disable(dmc->dmc_info[dmc_id].mmiodata[i]); else return dmc->dmc_info[dmc_id].mmiodata[i]; } @@ -666,12 +698,6 @@ static void assert_dmc_loaded(struct intel_display *display, found = intel_de_read(display, reg); expected = dmc_mmiodata(display, dmc, dmc_id, i); - /* once set DMC_EVT_CTL_ENABLE can't be cleared :/ */ - if (is_dmc_evt_ctl_reg(display, dmc_id, reg)) { - found &= ~DMC_EVT_CTL_ENABLE; - expected &= ~DMC_EVT_CTL_ENABLE; - } - drm_WARN(display->drm, found != expected, "DMC %d mmio[%d]/0x%x incorrect (expected 0x%x, current 0x%x)\n", dmc_id, i, i915_mmio_reg_offset(reg), expected, found); @@ -692,11 +718,11 @@ static bool need_pipedmc_load_program(struct intel_display *display) static bool need_pipedmc_load_mmio(struct intel_display *display, enum pipe pipe) { /* - * PTL: + * Xe3_LPD/Xe3p_LPD: * - pipe A/B DMC doesn't need save/restore * - pipe C/D DMC is in PG0, needs manual save/restore */ - if (DISPLAY_VER(display) == 30) + if (IS_DISPLAY_VER(display, 30, 35)) return pipe >= PIPE_C; /* @@ -824,7 +850,7 @@ static void dmc_configure_event(struct intel_display *display, if (!is_event_handler(display, dmc_id, event_id, reg, data)) continue; - intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable()); + intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable(data)); num_handlers++; } @@ -1194,7 +1220,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, } num_entries = package_header->num_entries; - if (WARN_ON(package_header->num_entries > max_entries)) + if (WARN_ON(num_entries > max_entries)) num_entries = max_entries; fw_info = (const struct intel_fw_info *) @@ -1693,14 +1719,14 @@ void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe) drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC GTT fault\n", crtc->base.base.id, crtc->base.name); if (tmp & PIPEDMC_ERROR) - drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC error\n", + drm_err(display->drm, "[CRTC:%d:%s] PIPEDMC error\n", crtc->base.base.id, crtc->base.name); } int_vector = intel_de_read(display, PIPEDMC_STATUS(pipe)) & PIPEDMC_INT_VECTOR_MASK; if (tmp == 0 && int_vector != 0) - drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC interrupt vector 0x%x\n", - crtc->base.base.id, crtc->base.name, tmp); + drm_err(display->drm, "[CRTC:%d:%s] PIPEDMC interrupt vector 0x%x\n", + crtc->base.base.id, crtc->base.name, int_vector); } void intel_pipedmc_enable_event(struct intel_crtc *crtc, diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c index b3bb89ba34f9..73a3101514f3 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c +++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c @@ -179,11 +179,11 @@ static void intel_dmc_wl_work(struct work_struct *work) if (refcount_read(&wl->refcount)) goto out_unlock; - __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0); + intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0); - if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL, - DMC_WAKELOCK_CTL_ACK, 0, - DMC_WAKELOCK_CTL_TIMEOUT_US)) { + if (intel_de_wait_fw_us_atomic(display, DMC_WAKELOCK1_CTL, + DMC_WAKELOCK_CTL_ACK, 0, + DMC_WAKELOCK_CTL_TIMEOUT_US, NULL)) { WARN_RATELIMIT(1, "DMC wakelock release timed out"); goto out_unlock; } @@ -207,17 +207,16 @@ static void __intel_dmc_wl_take(struct intel_display *display) if (wl->taken) return; - __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0, - DMC_WAKELOCK_CTL_REQ); + intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, 0, DMC_WAKELOCK_CTL_REQ); /* * We need to use the atomic variant of the waiting routine * because the DMC wakelock is also taken in atomic context. */ - if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL, - DMC_WAKELOCK_CTL_ACK, - DMC_WAKELOCK_CTL_ACK, - DMC_WAKELOCK_CTL_TIMEOUT_US)) { + if (intel_de_wait_fw_us_atomic(display, DMC_WAKELOCK1_CTL, + DMC_WAKELOCK_CTL_ACK, + DMC_WAKELOCK_CTL_ACK, + DMC_WAKELOCK_CTL_TIMEOUT_US, NULL)) { WARN_RATELIMIT(1, "DMC wakelock ack timed out"); return; } @@ -360,7 +359,7 @@ void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state) * wakelock, because we're just enabling it, so call the * non-locking version directly here. */ - __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE); + intel_de_rmw_fw(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE); wl->enabled = true; @@ -402,7 +401,7 @@ void intel_dmc_wl_disable(struct intel_display *display) goto out_unlock; /* Disable wakelock in DMC */ - __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0); + intel_de_rmw_fw(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0); wl->enabled = false; @@ -414,7 +413,7 @@ void intel_dmc_wl_disable(struct intel_display *display) * * TODO: Get the correct expectation from the hardware team. */ - __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0); + intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0); wl->taken = false; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 2eab591a8ef5..0ec82fcbcf48 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -51,7 +51,6 @@ #include <drm/drm_probe_helper.h> #include "g4x_dp.h" -#include "i915_utils.h" #include "intel_alpm.h" #include "intel_atomic.h" #include "intel_audio.h" @@ -64,6 +63,8 @@ #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_driver.h" +#include "intel_display_jiffies.h" +#include "intel_display_utils.h" #include "intel_display_regs.h" #include "intel_display_rpm.h" #include "intel_display_types.h" @@ -93,14 +94,10 @@ #include "intel_psr.h" #include "intel_quirks.h" #include "intel_tc.h" +#include "intel_vblank.h" #include "intel_vdsc.h" #include "intel_vrr.h" -/* DP DSC throughput values used for slice count calculations KPixels/s */ -#define DP_DSC_PEAK_PIXEL_RATE 2720000 -#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 -#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 - /* Max DSC line buffer depth supported by HW. */ #define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13 @@ -1018,13 +1015,43 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, struct intel_display *display = to_intel_display(connector); u8 min_slice_count, i; int max_slice_width; + int tp_rgb_yuv444; + int tp_yuv422_420; - if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) - min_slice_count = DIV_ROUND_UP(mode_clock, - DP_DSC_MAX_ENC_THROUGHPUT_0); - else - min_slice_count = DIV_ROUND_UP(mode_clock, - DP_DSC_MAX_ENC_THROUGHPUT_1); + /* + * TODO: Use the throughput value specific to the actual RGB/YUV + * format of the output. + * The RGB/YUV444 throughput value should be always either equal + * or smaller than the YUV422/420 value, but let's not depend on + * this assumption. + */ + if (mode_clock > max(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444, + connector->dp.dsc_branch_caps.overall_throughput.yuv422_420)) + return 0; + + if (mode_hdisplay > connector->dp.dsc_branch_caps.max_line_width) + return 0; + + /* + * TODO: Pass the total pixel rate of all the streams transferred to + * an MST tiled display, calculate the total slice count for all tiles + * from this and the per-tile slice count from the total slice count. + */ + tp_rgb_yuv444 = drm_dp_dsc_sink_max_slice_throughput(connector->dp.dsc_dpcd, + mode_clock, true); + tp_yuv422_420 = drm_dp_dsc_sink_max_slice_throughput(connector->dp.dsc_dpcd, + mode_clock, false); + + /* + * TODO: Use the throughput value specific to the actual RGB/YUV + * format of the output. + * For now use the smaller of these, which is ok, potentially + * resulting in a higher than required minimum slice count. + * The RGB/YUV444 throughput value should be always either equal + * or smaller than the YUV422/420 value, but let's not depend on + * this assumption. + */ + min_slice_count = DIV_ROUND_UP(mode_clock, min(tp_rgb_yuv444, tp_yuv422_420)); /* * Due to some DSC engine BW limitations, we need to enable second @@ -2340,24 +2367,26 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, return 0; } -static void intel_dp_fec_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state) +/* + * Return whether FEC must be enabled for 8b10b SST or MST links. On 128b132b + * links FEC is always enabled implicitly by the HW, so this function returns + * false for that case. + */ +bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state, + bool dsc_enabled_on_crtc) { - if (crtc_state->fec_enable) - return; + if (intel_dp_is_uhbr(crtc_state)) + return false; /* * Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional. * Since, FEC is a bandwidth overhead, continue to not enable it for * eDP. Until, there is a good reason to do so. */ - if (intel_dp_is_edp(intel_dp)) - return; - - if (intel_dp_is_uhbr(crtc_state)) - return; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return false; - crtc_state->fec_enable = true; + return dsc_enabled_on_crtc || intel_dsc_enabled_on_link(crtc_state); } int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, @@ -2375,7 +2404,11 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, bool is_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST); int ret; - intel_dp_fec_compute_config(intel_dp, pipe_config); + /* + * FIXME: set the FEC enabled state once pipe_config->port_clock is + * already known, so the UHBR/non-UHBR mode can be determined. + */ + pipe_config->fec_enable = intel_dp_needs_8b10b_fec(pipe_config, true); if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format)) return -EINVAL; @@ -2450,7 +2483,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, return ret; } - pipe_config->dsc.compression_enable = true; + intel_dsc_enable_on_crtc(pipe_config); + drm_dbg_kms(display->drm, "DP DSC computed with Input Bpp = %d " "Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n", pipe_config->pipe_bpp, @@ -2460,6 +2494,40 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, return 0; } +static int +dsc_throughput_quirk_max_bpp_x16(const struct intel_connector *connector, + const struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + if (!connector->dp.dsc_throughput_quirk) + return INT_MAX; + + /* + * Synaptics Panamera branch devices have a problem decompressing a + * stream with a compressed link-bpp higher than 12, if the pixel + * clock is higher than ~50 % of the maximum overall throughput + * reported by the branch device. Work around this by limiting the + * maximum link bpp for such pixel clocks. + * + * TODO: Use the throughput value specific to the actual RGB/YUV + * format of the output, after determining the pixel clock limit for + * YUV modes. For now use the smaller of the throughput values, which + * may result in limiting the link-bpp value already at a lower than + * required mode clock in case of native YUV422/420 output formats. + * The RGB/YUV444 throughput value should be always either equal or + * smaller than the YUV422/420 value, but let's not depend on this + * assumption. + */ + if (adjusted_mode->crtc_clock < + min(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444, + connector->dp.dsc_branch_caps.overall_throughput.yuv422_420) / 2) + return INT_MAX; + + return fxp_q4_from_int(12); +} + /* * Calculate the output link min, max bpp values in limits based on the pipe bpp * range, crtc_state and dsc mode. Return true on success. @@ -2491,6 +2559,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, } else { int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; + int throughput_max_bpp_x16; dsc_src_min_bpp = intel_dp_dsc_min_src_compressed_bpp(); dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state); @@ -2505,6 +2574,19 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; max_link_bpp_x16 = min(max_link_bpp_x16, fxp_q4_from_int(dsc_max_bpp)); + + throughput_max_bpp_x16 = dsc_throughput_quirk_max_bpp_x16(connector, crtc_state); + throughput_max_bpp_x16 = clamp(throughput_max_bpp_x16, + limits->link.min_bpp_x16, max_link_bpp_x16); + if (throughput_max_bpp_x16 < max_link_bpp_x16) { + max_link_bpp_x16 = throughput_max_bpp_x16; + + drm_dbg_kms(display->drm, + "[CRTC:%d:%s][CONNECTOR:%d:%s] Decreasing link max bpp to " FXP_Q4_FMT " due to DSC throughput quirk\n", + crtc->base.base.id, crtc->base.name, + connector->base.base.id, connector->base.name, + FXP_Q4_ARGS(max_link_bpp_x16)); + } } limits->link.max_bpp_x16 = max_link_bpp_x16; @@ -4169,7 +4251,36 @@ static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux, dsc_dpcd); } -void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector) +static void init_dsc_overall_throughput_limits(struct intel_connector *connector, bool is_branch) +{ + u8 branch_caps[DP_DSC_BRANCH_CAP_SIZE]; + int line_width; + + connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 = INT_MAX; + connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 = INT_MAX; + connector->dp.dsc_branch_caps.max_line_width = INT_MAX; + + if (!is_branch) + return; + + if (drm_dp_dpcd_read_data(connector->dp.dsc_decompression_aux, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, branch_caps, + sizeof(branch_caps)) != 0) + return; + + connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 = + drm_dp_dsc_branch_max_overall_throughput(branch_caps, true) ? : INT_MAX; + + connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 = + drm_dp_dsc_branch_max_overall_throughput(branch_caps, false) ? : INT_MAX; + + line_width = drm_dp_dsc_branch_max_line_width(branch_caps); + connector->dp.dsc_branch_caps.max_line_width = line_width > 0 ? line_width : INT_MAX; +} + +void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, + const struct drm_dp_desc *desc, bool is_branch, + struct intel_connector *connector) { struct intel_display *display = to_intel_display(connector); @@ -4182,6 +4293,9 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector) /* Clear fec_capable to avoid using stale values */ connector->dp.fec_capability = 0; + memset(&connector->dp.dsc_branch_caps, 0, sizeof(connector->dp.dsc_branch_caps)); + connector->dp.dsc_throughput_quirk = false; + if (dpcd_rev < DP_DPCD_REV_14) return; @@ -4196,6 +4310,19 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector) drm_dbg_kms(display->drm, "FEC CAPABILITY: %x\n", connector->dp.fec_capability); + + if (!(connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED)) + return; + + init_dsc_overall_throughput_limits(connector, is_branch); + + /* + * TODO: Move the HW rev check as well to the DRM core quirk table if + * that's required after clarifying the list of affected devices. + */ + if (drm_dp_has_quirk(desc, DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) && + desc->ident.hw_rev == 0x10) + connector->dp.dsc_throughput_quirk = true; } static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector) @@ -4204,6 +4331,9 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector * return; intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd); + + if (connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED) + init_dsc_overall_throughput_limits(connector, false); } static void @@ -4220,6 +4350,7 @@ intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *conn connector); else intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV], + &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd), connector); } @@ -5553,7 +5684,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) if (intel_alpm_get_error(intel_dp)) { intel_alpm_disable(intel_dp); - intel_dp->alpm_parameters.sink_alpm_error = true; + intel_dp->alpm.sink_alpm_error = true; } if (intel_dp_test_short_pulse(intel_dp)) @@ -5921,6 +6052,8 @@ intel_dp_detect(struct drm_connector *_connector, memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd)); intel_dp->psr.sink_panel_replay_support = false; intel_dp->psr.sink_panel_replay_su_support = false; + intel_dp->psr.sink_panel_replay_dsc_support = + INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED; intel_dp_mst_disconnect(intel_dp); @@ -6857,3 +6990,81 @@ void intel_dp_mst_resume(struct intel_display *display) } } } + +static +int intel_dp_sdp_compute_config_late(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + int guardband = intel_crtc_vblank_length(crtc_state); + int min_sdp_guardband = intel_dp_sdp_min_guardband(crtc_state, false); + + if (guardband < min_sdp_guardband) { + drm_dbg_kms(display->drm, "guardband %d < min sdp guardband %d\n", + guardband, min_sdp_guardband); + return -EINVAL; + } + + return 0; +} + +int intel_dp_compute_config_late(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + int ret; + + intel_psr_compute_config_late(intel_dp, crtc_state); + + ret = intel_dp_sdp_compute_config_late(crtc_state); + if (ret) + return ret; + + return 0; +} + +static +int intel_dp_get_lines_for_sdp(const struct intel_crtc_state *crtc_state, u32 type) +{ + switch (type) { + case DP_SDP_VSC_EXT_VESA: + case DP_SDP_VSC_EXT_CEA: + return 10; + case HDMI_PACKET_TYPE_GAMUT_METADATA: + return 8; + case DP_SDP_PPS: + return 7; + case DP_SDP_ADAPTIVE_SYNC: + return crtc_state->vrr.vsync_start + 1; + default: + break; + } + + return 0; +} + +int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state, + bool assume_all_enabled) +{ + struct intel_display *display = to_intel_display(crtc_state); + int sdp_guardband = 0; + + if (assume_all_enabled || + crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA)) + sdp_guardband = max(sdp_guardband, + intel_dp_get_lines_for_sdp(crtc_state, + HDMI_PACKET_TYPE_GAMUT_METADATA)); + + if (assume_all_enabled || + crtc_state->dsc.compression_enable) + sdp_guardband = max(sdp_guardband, + intel_dp_get_lines_for_sdp(crtc_state, DP_SDP_PPS)); + + if ((assume_all_enabled && HAS_AS_SDP(display)) || + crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC)) + sdp_guardband = max(sdp_guardband, + intel_dp_get_lines_for_sdp(crtc_state, DP_SDP_ADAPTIVE_SYNC)); + + return sdp_guardband; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index f90cfd1dbbd0..200a8b267f64 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -12,6 +12,7 @@ enum intel_output_format; enum pipe; enum port; struct drm_connector_state; +struct drm_dp_desc; struct drm_dp_vsc_sdp; struct drm_encoder; struct drm_modeset_acquire_ctx; @@ -72,6 +73,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder); int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); +bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state, + bool dsc_enabled_on_crtc); int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state, @@ -199,7 +202,9 @@ bool intel_dp_compute_config_limits(struct intel_dp *intel_dp, bool dsc, struct link_config_limits *limits); -void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector); +void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, + const struct drm_dp_desc *desc, bool is_branch, + struct intel_connector *connector); bool intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder); bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, @@ -215,5 +220,10 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state, int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector); void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external); bool intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state); +int intel_dp_compute_config_late(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state); +int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state, + bool assume_all_enabled); #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 829a7c0fbe4f..809799f63e32 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -5,9 +5,9 @@ #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_dp_aux.h" #include "intel_dp_aux_regs.h" @@ -62,9 +62,9 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) u32 status; int ret; - ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, - 0, - 2, timeout_ms, &status); + ret = intel_de_wait_ms(display, ch_ctl, + DP_AUX_CH_CTL_SEND_BUSY, 0, + timeout_ms, &status); if (ret == -ETIMEDOUT) drm_err(display->drm, diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index bd757db85927..14ed0ea22dd3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -782,9 +782,9 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector, return -EINVAL; /* Wait for encryption confirmation */ - if (intel_de_wait(display, HDCP_STATUS(display, cpu_transcoder, port), - stream_enc_status, enable ? stream_enc_status : 0, - HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { + if (intel_de_wait_ms(display, HDCP_STATUS(display, cpu_transcoder, port), + stream_enc_status, enable ? stream_enc_status : 0, + HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS, NULL)) { drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", transcoder_name(cpu_transcoder), str_enabled_disabled(enable)); return -ETIMEDOUT; @@ -821,10 +821,10 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, return ret; /* Wait for encryption confirmation */ - if (intel_de_wait(display, HDCP2_STREAM_STATUS(display, cpu_transcoder, pipe), - STREAM_ENCRYPTION_STATUS, - enable ? STREAM_ENCRYPTION_STATUS : 0, - HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { + if (intel_de_wait_ms(display, HDCP2_STREAM_STATUS(display, cpu_transcoder, pipe), + STREAM_ENCRYPTION_STATUS, + enable ? STREAM_ENCRYPTION_STATUS : 0, + HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS, NULL)) { drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", transcoder_name(cpu_transcoder), str_enabled_disabled(enable)); return -ETIMEDOUT; diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 27f3716bdc1f..aad5fe14962f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -27,9 +27,10 @@ #include <drm/display/drm_dp_helper.h> #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_display_core.h" +#include "intel_display_jiffies.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_encoder.h" diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 352f7ef29c28..4c0b943fe86f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -33,7 +33,6 @@ #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> -#include "i915_utils.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_connector.h" @@ -43,6 +42,7 @@ #include "intel_display_driver.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_dp_hdcp.h" #include "intel_dp_link_training.h" @@ -293,12 +293,22 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp, mst_stream_update_slots(crtc_state, mst_state); } - if (dsc) { - if (!intel_dp_supports_fec(intel_dp, connector, crtc_state)) - return -EINVAL; - - crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state); - } + /* + * NOTE: The following must reset crtc_state->fec_enable for UHBR/DSC + * after it was set by intel_dp_dsc_compute_config() -> + * intel_dp_needs_8b10b_fec(). + */ + crtc_state->fec_enable = intel_dp_needs_8b10b_fec(crtc_state, dsc); + /* + * If FEC gets enabled only because of another compressed stream, FEC + * may not be supported for this uncompressed stream on the whole link + * path until the sink DPRX. In this case a downstream branch device + * will disable FEC for the uncompressed stream as expected and so the + * FEC support doesn't need to be checked for this uncompressed stream. + */ + if (crtc_state->fec_enable && dsc && + !intel_dp_supports_fec(intel_dp, connector, crtc_state)) + return -EINVAL; max_dpt_bpp_x16 = fxp_q4_from_int(intel_dp_mst_max_dpt_bpp(crtc_state, dsc)); if (max_dpt_bpp_x16 && max_bpp_x16 > max_dpt_bpp_x16) { @@ -811,14 +821,14 @@ static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state, return mask; } -static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state, +static int intel_dp_mst_check_dsc_change(struct intel_atomic_state *state, struct drm_dp_mst_topology_mgr *mst_mgr, struct intel_link_bw_limits *limits) { struct intel_display *display = to_intel_display(state); struct intel_crtc *crtc; u8 mst_pipe_mask; - u8 fec_pipe_mask = 0; + u8 dsc_pipe_mask = 0; int ret; mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL); @@ -831,16 +841,16 @@ static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state, if (drm_WARN_ON(display->drm, !crtc_state)) return -EINVAL; - if (crtc_state->fec_enable) - fec_pipe_mask |= BIT(crtc->pipe); + if (intel_dsc_enabled_on_link(crtc_state)) + dsc_pipe_mask |= BIT(crtc->pipe); } - if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask) + if (!dsc_pipe_mask || mst_pipe_mask == dsc_pipe_mask) return 0; - limits->force_fec_pipes |= mst_pipe_mask; + limits->link_dsc_pipes |= mst_pipe_mask; - ret = intel_modeset_pipes_in_mask_early(state, "MST FEC", + ret = intel_modeset_pipes_in_mask_early(state, "MST DSC", mst_pipe_mask); return ret ? : -EAGAIN; @@ -894,7 +904,7 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state, int i; for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) { - ret = intel_dp_mst_check_fec_change(state, mgr, limits); + ret = intel_dp_mst_check_dsc_change(state, mgr, limits); if (ret) return ret; @@ -1658,6 +1668,7 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector) { u8 dpcd_caps[DP_RECEIVER_CAP_SIZE]; + struct drm_dp_desc desc; if (!connector->dp.dsc_decompression_aux) return; @@ -1665,7 +1676,13 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp, if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0) return; - intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector); + if (drm_dp_read_desc(connector->dp.dsc_decompression_aux, &desc, + drm_dp_is_branch(dpcd_caps)) < 0) + return; + + intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], + &desc, drm_dp_is_branch(dpcd_caps), + connector); } static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector) diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 3f77ad92c156..8027bab2951b 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -24,13 +24,13 @@ #include <drm/drm_print.h> #include "bxt_dpio_phy_regs.h" -#include "i915_utils.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_power_well.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_dpio_phy.h" #include "vlv_dpio_phy_regs.h" @@ -390,7 +390,7 @@ static u32 bxt_get_grc(struct intel_display *display, enum dpio_phy phy) static void bxt_phy_wait_grc_done(struct intel_display *display, enum dpio_phy phy) { - if (intel_de_wait_for_set(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10)) + if (intel_de_wait_for_set_ms(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10)) drm_err(display->drm, "timeout waiting for PHY%d GRC\n", phy); } @@ -427,7 +427,7 @@ static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy) * The flag should get set in 100us according to the HW team, but * use 1ms due to occasional timeouts observed with that. */ - if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy), + if (intel_de_wait_ms(display, BXT_PORT_CL1CM_DW0(phy), PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1, NULL)) drm_err(display->drm, "timeout during PHY%d power on\n", phy); @@ -1173,6 +1173,7 @@ void vlv_wait_port_ready(struct intel_encoder *encoder, struct intel_display *display = to_intel_display(encoder); u32 port_mask; i915_reg_t dpll_reg; + u32 val; switch (encoder->port) { default: @@ -1193,10 +1194,9 @@ void vlv_wait_port_ready(struct intel_encoder *encoder, break; } - if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000)) + if (intel_de_wait_ms(display, dpll_reg, port_mask, expected_mask, 1000, &val)) drm_WARN(display->drm, 1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", encoder->base.base.id, encoder->base.name, - intel_de_read(display, dpll_reg) & port_mask, - expected_mask); + val & port_mask, expected_mask); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index f969c5399a51..4f1db8493a2e 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -17,6 +17,7 @@ #include "intel_display_types.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" +#include "intel_lt_phy.h" #include "intel_lvds.h" #include "intel_lvds_regs.h" #include "intel_panel.h" @@ -1232,6 +1233,28 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state, return 0; } +static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_encoder *encoder = + intel_get_crtc_new_encoder(state, crtc_state); + int ret; + + ret = intel_lt_phy_pll_calc_state(crtc_state, encoder); + if (ret) + return ret; + + /* TODO: Do the readback via intel_compute_shared_dplls() */ + crtc_state->port_clock = + intel_lt_phy_calc_port_clock(encoder, crtc_state); + + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + + return 0; +} + static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); @@ -1691,6 +1714,10 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, return 0; } +static const struct intel_dpll_global_funcs xe3plpd_dpll_funcs = { + .crtc_compute_clock = xe3plpd_crtc_compute_clock, +}; + static const struct intel_dpll_global_funcs mtl_dpll_funcs = { .crtc_compute_clock = mtl_crtc_compute_clock, }; @@ -1789,7 +1816,9 @@ int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state, void intel_dpll_init_clock_hook(struct intel_display *display) { - if (DISPLAY_VER(display) >= 14) + if (HAS_LT_PHY(display)) + display->funcs.dpll = &xe3plpd_dpll_funcs; + else if (DISPLAY_VER(display) >= 14) display->funcs.dpll = &mtl_dpll_funcs; else if (display->platform.dg2) display->funcs.dpll = &dg2_dpll_funcs; @@ -1990,7 +2019,7 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state) intel_de_posting_read(display, DPLL(display, pipe)); udelay(150); - if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1)) + if (intel_de_wait_for_set_ms(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1)) drm_err(display->drm, "DPLL %d failed to lock\n", pipe); } @@ -2136,7 +2165,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) intel_de_write(display, DPLL(display, pipe), hw_state->dpll); /* Check PLL is locked */ - if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1)) + if (intel_de_wait_for_set_ms(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1)) drm_err(display->drm, "PLL %d failed to lock\n", pipe); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 8ea96cc524a1..9c7cf03cf022 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -27,11 +27,11 @@ #include <drm/drm_print.h> #include "bxt_dpio_phy_regs.h" -#include "i915_utils.h" #include "intel_cx0_phy.h" #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" #include "intel_dpio_phy.h" @@ -1395,7 +1395,7 @@ static void skl_ddi_pll_enable(struct intel_display *display, /* the enable bit is always bit 31 */ intel_de_rmw(display, regs[id].ctl, 0, LCPLL_PLL_ENABLE); - if (intel_de_wait_for_set(display, DPLL_STATUS, DPLL_LOCK(id), 5)) + if (intel_de_wait_for_set_ms(display, DPLL_STATUS, DPLL_LOCK(id), 5)) drm_err(display->drm, "DPLL %d not locked\n", id); } @@ -2057,9 +2057,9 @@ static void bxt_ddi_pll_enable(struct intel_display *display, intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_POWER_ENABLE); - ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port), - PORT_PLL_POWER_STATE, PORT_PLL_POWER_STATE, - 200, 0, NULL); + ret = intel_de_wait_for_set_us(display, + BXT_PORT_PLL_ENABLE(port), + PORT_PLL_POWER_STATE, 200); if (ret) drm_err(display->drm, "Power state not set for PLL:%d\n", port); @@ -2122,9 +2122,8 @@ static void bxt_ddi_pll_enable(struct intel_display *display, intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE); intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port)); - ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port), - PORT_PLL_LOCK, PORT_PLL_LOCK, - 200, 0, NULL); + ret = intel_de_wait_for_set_us(display, BXT_PORT_PLL_ENABLE(port), + PORT_PLL_LOCK, 200); if (ret) drm_err(display->drm, "PLL %d not locked\n", port); @@ -2158,9 +2157,9 @@ static void bxt_ddi_pll_disable(struct intel_display *display, intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_POWER_ENABLE, 0); - ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port), - PORT_PLL_POWER_STATE, 0, - 200, 0, NULL); + ret = intel_de_wait_for_clear_us(display, + BXT_PORT_PLL_ENABLE(port), + PORT_PLL_POWER_STATE, 200); if (ret) drm_err(display->drm, "Power state not reset for PLL:%d\n", port); @@ -3921,7 +3920,7 @@ static void icl_pll_power_enable(struct intel_display *display, * The spec says we need to "wait" but it also says it should be * immediate. */ - if (intel_de_wait_for_set(display, enable_reg, PLL_POWER_STATE, 1)) + if (intel_de_wait_for_set_ms(display, enable_reg, PLL_POWER_STATE, 1)) drm_err(display->drm, "PLL %d Power not enabled\n", pll->info->id); } @@ -3933,7 +3932,7 @@ static void icl_pll_enable(struct intel_display *display, intel_de_rmw(display, enable_reg, 0, PLL_ENABLE); /* Timeout is actually 600us. */ - if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 1)) + if (intel_de_wait_for_set_ms(display, enable_reg, PLL_LOCK, 1)) drm_err(display->drm, "PLL %d not locked\n", pll->info->id); } @@ -4046,7 +4045,7 @@ static void icl_pll_disable(struct intel_display *display, intel_de_rmw(display, enable_reg, PLL_ENABLE, 0); /* Timeout is actually 1us. */ - if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 1)) + if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_LOCK, 1)) drm_err(display->drm, "PLL %d locked\n", pll->info->id); /* DVFS post sequence would be here. See the comment above. */ @@ -4057,7 +4056,7 @@ static void icl_pll_disable(struct intel_display *display, * The spec says we need to "wait" but it also says it should be * immediate. */ - if (intel_de_wait_for_clear(display, enable_reg, PLL_POWER_STATE, 1)) + if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_POWER_STATE, 1)) drm_err(display->drm, "PLL %d Power not disabled\n", pll->info->id); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index f131bdd1c975..6183da90b28d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -267,6 +267,16 @@ struct intel_cx0pll_state { bool tbt_mode; }; +struct intel_lt_phy_pll_state { + u32 clock; /* in kHz */ + u8 addr_msb[13]; + u8 addr_lsb[13]; + u8 data[13][4]; + u8 config[3]; + bool ssc_enabled; + bool tbt_mode; +}; + struct intel_dpll_hw_state { union { struct i9xx_dpll_hw_state i9xx; @@ -276,6 +286,7 @@ struct intel_dpll_hw_state { struct icl_dpll_hw_state icl; struct intel_mpllb_state mpllb; struct intel_cx0pll_state cx0pll; + struct intel_lt_phy_pll_state ltpll; }; }; diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index c0a817018d08..58d953472218 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -3,6 +3,8 @@ * Copyright © 2021 Intel Corporation */ +#include <drm/drm_print.h> + #include "gem/i915_gem_domain.h" #include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index dee44d45b668..4ad4efbf9253 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -115,24 +115,6 @@ static bool pre_commit_is_vrr_active(struct intel_atomic_state *state, return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc); } -static int dsb_vblank_delay(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - const struct intel_crtc_state *crtc_state = - intel_pre_commit_crtc_state(state, crtc); - - if (pre_commit_is_vrr_active(state, crtc)) - /* - * When the push is sent during vblank it will trigger - * on the next scanline, hence we have up to one extra - * scanline until the delayed vblank occurs after - * TRANS_PUSH has been written. - */ - return intel_vrr_vblank_delay(crtc_state) + 1; - else - return intel_mode_vblank_delay(&crtc_state->hw.adjusted_mode); -} - static int dsb_vtotal(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -723,7 +705,7 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state, intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT, 0, 0); if (pre_commit_is_vrr_active(state, crtc)) { - int vblank_delay = intel_vrr_vblank_delay(crtc_state); + int vblank_delay = crtc_state->set_context_latency; end = intel_vrr_vmin_vblank_start(crtc_state); start = end - vblank_delay - latency; @@ -815,16 +797,43 @@ void intel_dsb_chain(struct intel_atomic_state *state, wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0); } -void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state, - struct intel_dsb *dsb) +void intel_dsb_wait_for_delayed_vblank(struct intel_atomic_state *state, + struct intel_dsb *dsb) { struct intel_crtc *crtc = dsb->crtc; const struct intel_crtc_state *crtc_state = intel_pre_commit_crtc_state(state, crtc); - int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode, - dsb_vblank_delay(state, crtc)); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int wait_scanlines; + + if (pre_commit_is_vrr_active(state, crtc)) { + /* + * If the push happened before the vmin decision boundary + * we don't know how far we are from the undelayed vblank. + * Wait until we're past the vmin safe window, at which + * point we're SCL lines away from the delayed vblank. + * + * If the push happened after the vmin decision boundary + * the hardware itself guarantees that we're SCL lines + * away from the delayed vblank, and we won't be inside + * the vmin safe window so this extra wait does nothing. + */ + intel_dsb_wait_scanline_out(state, dsb, + intel_vrr_safe_window_start(crtc_state), + intel_vrr_vmin_safe_window_end(crtc_state)); + /* + * When the push is sent during vblank it will trigger + * on the next scanline, hence we have up to one extra + * scanline until the delayed vblank occurs after + * TRANS_PUSH has been written. + */ + wait_scanlines = crtc_state->set_context_latency + 1; + } else { + wait_scanlines = intel_mode_vblank_delay(adjusted_mode); + } - intel_dsb_wait_usec(dsb, usecs); + intel_dsb_wait_usec(dsb, intel_scanlines_to_usecs(adjusted_mode, wait_scanlines)); } /** diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index c8f4499916eb..2f31f2c1d0c5 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -48,8 +48,8 @@ void intel_dsb_nonpost_end(struct intel_dsb *dsb); void intel_dsb_interrupt(struct intel_dsb *dsb); void intel_dsb_wait_usec(struct intel_dsb *dsb, int count); void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count); -void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state, - struct intel_dsb *dsb); +void intel_dsb_wait_for_delayed_vblank(struct intel_atomic_state *state, + struct intel_dsb *dsb); void intel_dsb_wait_scanline_in(struct intel_atomic_state *state, struct intel_dsb *dsb, int lower, int upper); diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 23402408e172..4b815ce6b1fe 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -38,10 +38,10 @@ #include <drm/drm_print.h> #include <video/mipi_display.h> -#include "i915_utils.h" #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dsi.h" #include "intel_dsi_vbt.h" #include "intel_gmbus_regs.h" @@ -106,8 +106,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 type, flags, seq_port; u16 len; enum port port; - - drm_dbg_kms(display->drm, "\n"); + ssize_t ret; + bool hs_mode; flags = *data++; type = *data++; @@ -129,45 +129,56 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, goto out; } - if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1) + hs_mode = (flags >> MIPI_TRANSFER_MODE_SHIFT) & 1; + if (hs_mode) dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM; else dsi_device->mode_flags |= MIPI_DSI_MODE_LPM; dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3; + drm_dbg_kms(display->drm, "DSI packet: Port %c (seq %u), Flags 0x%02x, VC %u, %s, Type 0x%02x, Length %u, Data %*ph\n", + port_name(port), seq_port, flags, dsi_device->channel, + hs_mode ? "HS" : "LP", type, len, (int)len, data); + switch (type) { case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: - mipi_dsi_generic_write(dsi_device, NULL, 0); + ret = mipi_dsi_generic_write(dsi_device, NULL, 0); break; case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: - mipi_dsi_generic_write(dsi_device, data, 1); + ret = mipi_dsi_generic_write(dsi_device, data, 1); break; case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: - mipi_dsi_generic_write(dsi_device, data, 2); + ret = mipi_dsi_generic_write(dsi_device, data, 2); break; case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: - drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n"); + ret = -EOPNOTSUPP; break; case MIPI_DSI_GENERIC_LONG_WRITE: - mipi_dsi_generic_write(dsi_device, data, len); + ret = mipi_dsi_generic_write(dsi_device, data, len); break; case MIPI_DSI_DCS_SHORT_WRITE: - mipi_dsi_dcs_write_buffer(dsi_device, data, 1); + ret = mipi_dsi_dcs_write_buffer(dsi_device, data, 1); break; case MIPI_DSI_DCS_SHORT_WRITE_PARAM: - mipi_dsi_dcs_write_buffer(dsi_device, data, 2); + ret = mipi_dsi_dcs_write_buffer(dsi_device, data, 2); break; case MIPI_DSI_DCS_READ: - drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n"); + ret = -EOPNOTSUPP; break; case MIPI_DSI_DCS_LONG_WRITE: - mipi_dsi_dcs_write_buffer(dsi_device, data, len); + ret = mipi_dsi_dcs_write_buffer(dsi_device, data, len); + break; + default: + ret = -EINVAL; break; } + if (ret < 0) + drm_err(display->drm, "DSI send packet failed with %pe\n", ERR_PTR(ret)); + if (DISPLAY_VER(display) < 11) vlv_dsi_wait_for_fifo_empty(intel_dsi, port); diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 08b48e36aca6..c2663d6e2c92 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -34,12 +34,12 @@ #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> -#include "i915_utils.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_driver.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dvo.h" #include "intel_dvo_dev.h" #include "intel_dvo_regs.h" diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index c48384e58ea1..b34b4961fe1c 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -9,13 +9,13 @@ #include <drm/drm_blend.h> #include <drm/drm_gem.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_print.h> -#include "i915_drv.h" -#include "i915_utils.h" #include "intel_bo.h" #include "intel_display.h" #include "intel_display_core.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dpt.h" #include "intel_fb.h" #include "intel_fb_bo.h" @@ -547,8 +547,6 @@ static bool plane_has_modifier(struct intel_display *display, u8 plane_caps, const struct intel_modifier_desc *md) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (!IS_DISPLAY_VER(display, md->display_ver.from, md->display_ver.until)) return false; @@ -560,15 +558,15 @@ static bool plane_has_modifier(struct intel_display *display, * where supported. */ if (intel_fb_is_ccs_modifier(md->modifier) && - HAS_FLAT_CCS(i915) != !md->ccs.packed_aux_planes) + HAS_AUX_CCS(display) != !!md->ccs.packed_aux_planes) return false; if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS && - (GRAPHICS_VER(i915) < 20 || !display->platform.dgfx)) + (DISPLAY_VER(display) < 14 || !display->platform.dgfx)) return false; if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS && - (GRAPHICS_VER(i915) < 20 || display->platform.dgfx)) + (DISPLAY_VER(display) < 20 || display->platform.dgfx)) return false; return true; @@ -777,7 +775,6 @@ unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) { struct intel_display *display = to_intel_display(fb->dev); - struct drm_i915_private *i915 = to_i915(display->drm); unsigned int cpp = fb->format->cpp[color_plane]; switch (fb->modifier) { @@ -814,7 +811,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) return 64; fallthrough; case I915_FORMAT_MOD_Y_TILED: - if (DISPLAY_VER(display) == 2 || HAS_128_BYTE_Y_TILING(i915)) + if (HAS_128B_Y_TILING(display)) return 128; else return 512; @@ -1329,7 +1326,7 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) * unclear in Bspec, for now no checking. */ stride = intel_fb_pitch(fb, 0, rotation); - max_stride = plane->max_stride(plane, fb->base.format->format, + max_stride = plane->max_stride(plane, fb->base.format, fb->base.modifier, rotation); return stride > max_stride; @@ -1975,7 +1972,8 @@ void intel_add_fb_offsets(int *x, int *y, static u32 intel_fb_max_stride(struct intel_display *display, - u32 pixel_format, u64 modifier) + const struct drm_format_info *info, + u64 modifier) { /* * Arbitrary limit for gen4+ chosen to match the @@ -1985,7 +1983,7 @@ u32 intel_fb_max_stride(struct intel_display *display, */ if (DISPLAY_VER(display) < 4 || intel_fb_is_ccs_modifier(modifier) || intel_fb_modifier_uses_dpt(display, modifier)) - return intel_plane_fb_max_stride(display->drm, pixel_format, modifier); + return intel_plane_fb_max_stride(display, info, modifier); else if (DISPLAY_VER(display) >= 7) return 256 * 1024; else @@ -1999,8 +1997,8 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) unsigned int tile_width; if (is_surface_linear(fb, color_plane)) { - unsigned int max_stride = intel_plane_fb_max_stride(display->drm, - fb->format->format, + unsigned int max_stride = intel_plane_fb_max_stride(display, + fb->format, fb->modifier); /* @@ -2058,7 +2056,7 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state) /* FIXME other color planes? */ stride = plane_state->view.color_plane[0].mapping_stride; - max_stride = plane->max_stride(plane, fb->format->format, + max_stride = plane->max_stride(plane, fb->format, fb->modifier, rotation); if (stride > max_stride) { @@ -2197,7 +2195,6 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, return ret; flush: - intel_bo_flush_if_display(obj); intel_frontbuffer_flush(front, ORIGIN_DIRTYFB); return ret; } @@ -2233,28 +2230,28 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, goto err_free_panic; } - ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd); + ret = intel_fb_bo_framebuffer_init(obj, mode_cmd); if (ret) goto err_frontbuffer_put; - ret = -EINVAL; if (!drm_any_plane_has_format(display->drm, mode_cmd->pixel_format, mode_cmd->modifier[0])) { drm_dbg_kms(display->drm, "unsupported pixel format %p4cc / modifier 0x%llx\n", &mode_cmd->pixel_format, mode_cmd->modifier[0]); + ret = -EINVAL; goto err_bo_framebuffer_fini; } - max_stride = intel_fb_max_stride(display, mode_cmd->pixel_format, - mode_cmd->modifier[0]); + max_stride = intel_fb_max_stride(display, info, mode_cmd->modifier[0]); if (mode_cmd->pitches[0] > max_stride) { drm_dbg_kms(display->drm, "%s pitch (%u) must be at most %d\n", mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? "tiled" : "linear", mode_cmd->pitches[0], max_stride); + ret = -EINVAL; goto err_bo_framebuffer_fini; } @@ -2263,6 +2260,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, drm_dbg_kms(display->drm, "plane 0 offset (0x%08x) must be 0\n", mode_cmd->offsets[0]); + ret = -EINVAL; goto err_bo_framebuffer_fini; } @@ -2273,6 +2271,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, if (mode_cmd->handles[i] != mode_cmd->handles[0]) { drm_dbg_kms(display->drm, "bad plane %d handle\n", i); + ret = -EINVAL; goto err_bo_framebuffer_fini; } @@ -2281,6 +2280,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, drm_dbg_kms(display->drm, "plane %d pitch (%d) must be at least %u byte aligned\n", i, fb->pitches[i], stride_alignment); + ret = -EINVAL; goto err_bo_framebuffer_fini; } @@ -2291,6 +2291,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, drm_dbg_kms(display->drm, "ccs aux plane %d pitch (%d) must be %d\n", i, fb->pitches[i], ccs_aux_stride); + ret = -EINVAL; goto err_bo_framebuffer_fini; } } diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c index b0e8b89f7ce8..bfecd73d5fa0 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_bo.c +++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c @@ -4,6 +4,7 @@ */ #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include "gem/i915_gem_object.h" @@ -18,8 +19,7 @@ void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj) /* Nothing to do for i915 */ } -int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb, - struct drm_gem_object *_obj, +int intel_fb_bo_framebuffer_init(struct drm_gem_object *_obj, struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_i915_gem_object *obj = to_intel_bo(_obj); diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h index eefcb05a99f0..d775773c6c03 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_bo.h +++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h @@ -14,8 +14,7 @@ struct drm_mode_fb_cmd2; void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj); -int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb, - struct drm_gem_object *obj, +int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); struct drm_gem_object * diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 45af04cb0fb2..7249b784fbba 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -7,6 +7,8 @@ * DOC: display pinning helpers */ +#include <drm/drm_print.h> + #include "gem/i915_gem_domain.h" #include "gem/i915_gem_object.h" diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 0d380c825791..437d2fda20a7 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -43,23 +43,23 @@ #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include "gem/i915_gem_stolen.h" #include "gt/intel_gt_types.h" #include "i915_drv.h" -#include "i915_utils.h" #include "i915_vgpu.h" #include "i915_vma.h" #include "i9xx_plane_regs.h" -#include "intel_cdclk.h" #include "intel_de.h" #include "intel_display_device.h" #include "intel_display_regs.h" #include "intel_display_rpm.h" #include "intel_display_trace.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_display_wa.h" #include "intel_fbc.h" #include "intel_fbc_regs.h" @@ -102,7 +102,8 @@ struct intel_fbc { struct mutex lock; unsigned int busy_bits; - struct i915_stolen_fb compressed_fb, compressed_llb; + struct intel_stolen_node *compressed_fb; + struct intel_stolen_node *compressed_llb; enum intel_fbc_id id; @@ -141,15 +142,18 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane return stride; } -static unsigned int intel_fbc_cfb_cpp(void) +static unsigned int intel_fbc_cfb_cpp(const struct intel_plane_state *plane_state) { - return 4; /* FBC always 4 bytes per pixel */ + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int cpp = fb->format->cpp[0]; + + return max(cpp, 4); } /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */ static unsigned int intel_fbc_plane_cfb_stride(const struct intel_plane_state *plane_state) { - unsigned int cpp = intel_fbc_cfb_cpp(); + unsigned int cpp = intel_fbc_cfb_cpp(plane_state); return intel_fbc_plane_stride(plane_state) * cpp; } @@ -203,7 +207,7 @@ static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_s struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); unsigned int stride = intel_fbc_plane_cfb_stride(plane_state); unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16; - unsigned int cpp = intel_fbc_cfb_cpp(); + unsigned int cpp = intel_fbc_cfb_cpp(plane_state); return _intel_fbc_cfb_stride(display, cpp, width, stride); } @@ -324,8 +328,8 @@ static void i8xx_fbc_deactivate(struct intel_fbc *fbc) intel_de_write(display, FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (intel_de_wait_for_clear(display, FBC_STATUS, - FBC_STAT_COMPRESSING, 10)) { + if (intel_de_wait_for_clear_ms(display, FBC_STATUS, + FBC_STAT_COMPRESSING, 10)) { drm_dbg_kms(display->drm, "FBC idle timed out\n"); return; } @@ -376,20 +380,19 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc) static void i8xx_fbc_program_cfb(struct intel_fbc *fbc) { struct intel_display *display = fbc->display; - struct drm_i915_private *i915 = to_i915(display->drm); drm_WARN_ON(display->drm, - range_end_overflows_t(u64, i915_gem_stolen_area_address(i915), - i915_gem_stolen_node_offset(&fbc->compressed_fb), + range_end_overflows_t(u64, i915_gem_stolen_area_address(display->drm), + i915_gem_stolen_node_offset(fbc->compressed_fb), U32_MAX)); drm_WARN_ON(display->drm, - range_end_overflows_t(u64, i915_gem_stolen_area_address(i915), - i915_gem_stolen_node_offset(&fbc->compressed_llb), + range_end_overflows_t(u64, i915_gem_stolen_area_address(display->drm), + i915_gem_stolen_node_offset(fbc->compressed_llb), U32_MAX)); intel_de_write(display, FBC_CFB_BASE, - i915_gem_stolen_node_address(i915, &fbc->compressed_fb)); + i915_gem_stolen_node_address(fbc->compressed_fb)); intel_de_write(display, FBC_LL_BASE, - i915_gem_stolen_node_address(i915, &fbc->compressed_llb)); + i915_gem_stolen_node_address(fbc->compressed_llb)); } static const struct intel_fbc_funcs i8xx_fbc_funcs = { @@ -497,7 +500,7 @@ static void g4x_fbc_program_cfb(struct intel_fbc *fbc) struct intel_display *display = fbc->display; intel_de_write(display, DPFC_CB_BASE, - i915_gem_stolen_node_offset(&fbc->compressed_fb)); + i915_gem_stolen_node_offset(fbc->compressed_fb)); } static const struct intel_fbc_funcs g4x_fbc_funcs = { @@ -566,7 +569,7 @@ static void ilk_fbc_program_cfb(struct intel_fbc *fbc) struct intel_display *display = fbc->display; intel_de_write(display, ILK_DPFC_CB_BASE(fbc->id), - i915_gem_stolen_node_offset(&fbc->compressed_fb)); + i915_gem_stolen_node_offset(fbc->compressed_fb)); } static const struct intel_fbc_funcs ilk_fbc_funcs = { @@ -797,7 +800,6 @@ static u64 intel_fbc_cfb_base_max(struct intel_display *display) static u64 intel_fbc_stolen_end(struct intel_display *display) { - struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm); u64 end; /* The FBC hardware for BDW/SKL doesn't have access to the stolen @@ -806,7 +808,7 @@ static u64 intel_fbc_stolen_end(struct intel_display *display) * underruns, even if that range is not reserved by the BIOS. */ if (display->platform.broadwell || (DISPLAY_VER(display) == 9 && !display->platform.broxton)) - end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024; + end = i915_gem_stolen_area_size(display->drm) - 8 * 1024 * 1024; else end = U64_MAX; @@ -835,20 +837,19 @@ static int find_compression_limit(struct intel_fbc *fbc, unsigned int size, int min_limit) { struct intel_display *display = fbc->display; - struct drm_i915_private *i915 = to_i915(display->drm); u64 end = intel_fbc_stolen_end(display); int ret, limit = min_limit; size /= limit; /* Try to over-allocate to reduce reallocations and fragmentation. */ - ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, + ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb, size <<= 1, 4096, 0, end); if (ret == 0) return limit; for (; limit <= intel_fbc_max_limit(display); limit <<= 1) { - ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, + ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb, size >>= 1, 4096, 0, end); if (ret == 0) return limit; @@ -861,17 +862,15 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, unsigned int size, int min_limit) { struct intel_display *display = fbc->display; - struct drm_i915_private *i915 = to_i915(display->drm); int ret; drm_WARN_ON(display->drm, - i915_gem_stolen_node_allocated(&fbc->compressed_fb)); + i915_gem_stolen_node_allocated(fbc->compressed_fb)); drm_WARN_ON(display->drm, - i915_gem_stolen_node_allocated(&fbc->compressed_llb)); + i915_gem_stolen_node_allocated(fbc->compressed_llb)); if (DISPLAY_VER(display) < 5 && !display->platform.g4x) { - ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb, - 4096, 4096); + ret = i915_gem_stolen_insert_node(fbc->compressed_llb, 4096, 4096); if (ret) goto err; } @@ -887,14 +886,14 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, drm_dbg_kms(display->drm, "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n", - i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit); + i915_gem_stolen_node_size(fbc->compressed_fb), fbc->limit); return 0; err_llb: - if (i915_gem_stolen_node_allocated(&fbc->compressed_llb)) - i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); + if (i915_gem_stolen_node_allocated(fbc->compressed_llb)) + i915_gem_stolen_remove_node(fbc->compressed_llb); err: - if (i915_gem_stolen_initialized(i915)) + if (i915_gem_stolen_initialized(display->drm)) drm_info_once(display->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; @@ -932,9 +931,12 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc) if (IS_DISPLAY_VER(display, 11, 12)) intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 0, DPFC_CHICKEN_COMP_DUMMY_PIXEL); - - /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */ - if (DISPLAY_VER(display) >= 11 && !display->platform.dg2) + /* + * Wa_22014263786 + * Fixes: Screen flicker with FBC and Package C state enabled + * Workaround: Forced SLB invalidation before start of new frame. + */ + if (intel_display_wa(display, 22014263786)) intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION); @@ -945,16 +947,13 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc) static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) { - struct intel_display *display = fbc->display; - struct drm_i915_private *i915 = to_i915(display->drm); - if (WARN_ON(intel_fbc_hw_is_active(fbc))) return; - if (i915_gem_stolen_node_allocated(&fbc->compressed_llb)) - i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); - if (i915_gem_stolen_node_allocated(&fbc->compressed_fb)) - i915_gem_stolen_remove_node(i915, &fbc->compressed_fb); + if (i915_gem_stolen_node_allocated(fbc->compressed_llb)) + i915_gem_stolen_remove_node(fbc->compressed_llb); + if (i915_gem_stolen_node_allocated(fbc->compressed_fb)) + i915_gem_stolen_remove_node(fbc->compressed_fb); } void intel_fbc_cleanup(struct intel_display *display) @@ -967,6 +966,9 @@ void intel_fbc_cleanup(struct intel_display *display) __intel_fbc_cleanup_cfb(fbc); mutex_unlock(&fbc->lock); + i915_gem_stolen_node_free(fbc->compressed_fb); + i915_gem_stolen_node_free(fbc->compressed_llb); + kfree(fbc); } } @@ -1083,11 +1085,57 @@ static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_ } } +static bool +xe3p_lpd_fbc_fp16_format_is_valid(const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + + switch (fb->format->format) { + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_ABGR16161616F: + return true; + default: + return false; + } +} + +static bool xe3p_lpd_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + + if (lnl_fbc_pixel_format_is_valid(plane_state)) + return true; + + if (xe3p_lpd_fbc_fp16_format_is_valid(plane_state)) + return true; + + switch (fb->format->format) { + case DRM_FORMAT_XRGB16161616: + case DRM_FORMAT_XBGR16161616: + case DRM_FORMAT_ARGB16161616: + case DRM_FORMAT_ABGR16161616: + return true; + default: + return false; + } +} + +bool +intel_fbc_is_enable_pixel_normalizer(const struct intel_plane_state *plane_state) +{ + struct intel_display *display = to_intel_display(plane_state); + + return DISPLAY_VER(display) >= 35 && + xe3p_lpd_fbc_fp16_format_is_valid(plane_state); +} + static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) { struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); - if (DISPLAY_VER(display) >= 20) + if (DISPLAY_VER(display) >= 35) + return xe3p_lpd_fbc_pixel_format_is_valid(plane_state); + else if (DISPLAY_VER(display) >= 20) return lnl_fbc_pixel_format_is_valid(plane_state); else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) return g4x_fbc_pixel_format_is_valid(plane_state); @@ -1355,7 +1403,7 @@ static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state) return intel_fbc_min_limit(plane_state) <= fbc->limit && intel_fbc_cfb_size(plane_state) <= fbc->limit * - i915_gem_stolen_node_size(&fbc->compressed_fb); + i915_gem_stolen_node_size(fbc->compressed_fb); } static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state) @@ -1421,6 +1469,18 @@ intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state, } } +static int _intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + /* WaFbcExceedCdClockThreshold:hsw,bdw */ + if (display->platform.haswell || display->platform.broadwell) + return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95); + + /* no FBC specific limits to worry about */ + return 0; +} + static int intel_fbc_check_plane(struct intel_atomic_state *state, struct intel_plane *plane) { @@ -1436,7 +1496,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, if (!fbc) return 0; - if (!i915_gem_stolen_initialized(i915)) { + if (!i915_gem_stolen_initialized(display->drm)) { plane_state->no_fbc_reason = "stolen memory not initialised"; return 0; } @@ -1462,7 +1522,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, } /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ - if (i915_vtd_active(i915) && (display->platform.skylake || display->platform.broxton)) { + if (intel_display_vtd_active(display) && + (display->platform.skylake || display->platform.broxton)) { plane_state->no_fbc_reason = "VT-d enabled"; return 0; } @@ -1560,18 +1621,9 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } - /* WaFbcExceedCdClockThreshold:hsw,bdw */ - if (display->platform.haswell || display->platform.broadwell) { - const struct intel_cdclk_state *cdclk_state; - - cdclk_state = intel_atomic_get_cdclk_state(state); - if (IS_ERR(cdclk_state)) - return PTR_ERR(cdclk_state); - - if (crtc_state->pixel_rate >= intel_cdclk_logical(cdclk_state) * 95 / 100) { - plane_state->no_fbc_reason = "pixel rate too high"; - return 0; - } + if (_intel_fbc_min_cdclk(crtc_state) > display->cdclk.max_cdclk_freq) { + plane_state->no_fbc_reason = "pixel rate too high"; + return 0; } plane_state->no_fbc_reason = NULL; @@ -1579,6 +1631,27 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } +int intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + int min_cdclk; + + if (!plane->fbc) + return 0; + + min_cdclk = _intel_fbc_min_cdclk(crtc_state); + + /* + * Do not ask for more than the max CDCLK frequency, + * if that is not enough FBC will simply not be used. + */ + if (min_cdclk > display->cdclk.max_cdclk_freq) + return 0; + + return min_cdclk; +} static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state, struct intel_crtc *crtc, @@ -2083,6 +2156,13 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display, if (!fbc) return NULL; + fbc->compressed_fb = i915_gem_stolen_node_alloc(display->drm); + if (!fbc->compressed_fb) + goto err; + fbc->compressed_llb = i915_gem_stolen_node_alloc(display->drm); + if (!fbc->compressed_llb) + goto err; + fbc->id = fbc_id; fbc->display = display; INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); @@ -2102,6 +2182,13 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display, fbc->funcs = &i8xx_fbc_funcs; return fbc; + +err: + i915_gem_stolen_node_free(fbc->compressed_llb); + i915_gem_stolen_node_free(fbc->compressed_fb); + kfree(fbc); + + return NULL; } /** diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index 0e715cb6b4e6..91424563206a 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -28,6 +28,7 @@ enum intel_fbc_id { }; int intel_fbc_atomic_check(struct intel_atomic_state *state); +int intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state); bool intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_post_update(struct intel_atomic_state *state, @@ -52,5 +53,7 @@ void intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_dirty_rect_update_noarm(struct intel_dsb *dsb, struct intel_plane *plane); +bool +intel_fbc_is_enable_pixel_normalizer(const struct intel_plane_state *plane_state); #endif /* __INTEL_FBC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 7c4709d58aa3..9cd03e2adeb2 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -146,8 +146,6 @@ static void intel_fbdev_fb_destroy(struct fb_info *info) drm_framebuffer_remove(fb_helper->fb); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } __diag_push(); @@ -207,14 +205,70 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { .fb_set_suspend = intelfb_set_suspend, }; +static void intel_fbdev_fill_mode_cmd(struct drm_fb_helper_surface_size *sizes, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + /* we don't do packed 24bpp */ + if (sizes->surface_bpp == 24) + sizes->surface_bpp = 32; + + mode_cmd->flags = DRM_MODE_FB_MODIFIERS; + mode_cmd->width = sizes->surface_width; + mode_cmd->height = sizes->surface_height; + + mode_cmd->pitches[0] = intel_fbdev_fb_pitch_align(mode_cmd->width * DIV_ROUND_UP(sizes->surface_bpp, 8)); + mode_cmd->pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + mode_cmd->modifier[0] = DRM_FORMAT_MOD_LINEAR; +} + +static struct intel_framebuffer * +__intel_fbdev_fb_alloc(struct intel_display *display, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_mode_fb_cmd2 mode_cmd = {}; + struct drm_framebuffer *fb; + struct drm_gem_object *obj; + int size; + + intel_fbdev_fill_mode_cmd(sizes, &mode_cmd); + + size = mode_cmd.pitches[0] * mode_cmd.height; + size = PAGE_ALIGN(size); + + obj = intel_fbdev_fb_bo_create(display->drm, size); + if (IS_ERR(obj)) { + fb = ERR_CAST(obj); + goto err; + } + + fb = intel_framebuffer_create(obj, + drm_get_format_info(display->drm, + mode_cmd.pixel_format, + mode_cmd.modifier[0]), + &mode_cmd); + if (IS_ERR(fb)) { + intel_fbdev_fb_bo_destroy(obj); + goto err; + } + + drm_gem_object_put(obj); + + return to_intel_framebuffer(fb); + +err: + return ERR_CAST(fb); + +} + int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct intel_display *display = to_intel_display(helper->dev); struct intel_fbdev *ifbdev = to_intel_fbdev(helper); struct intel_framebuffer *fb = ifbdev->fb; + struct fb_info *info = helper->info; struct ref_tracker *wakeref; - struct fb_info *info; struct i915_vma *vma; unsigned long flags = 0; bool prealloc = false; @@ -237,7 +291,8 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) { drm_dbg_kms(display->drm, "no BIOS fb, allocating a new one\n"); - fb = intel_fbdev_fb_alloc(helper, sizes); + + fb = __intel_fbdev_fb_alloc(display, sizes); if (IS_ERR(fb)) return PTR_ERR(fb); } else { @@ -263,13 +318,6 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, goto out_unlock; } - info = drm_fb_helper_alloc_info(helper); - if (IS_ERR(info)) { - drm_err(display->drm, "Failed to allocate fb_info (%pe)\n", info); - ret = PTR_ERR(info); - goto out_unpin; - } - helper->funcs = &intel_fb_helper_funcs; helper->fb = &fb->base; @@ -277,7 +325,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, obj = intel_fb_bo(&fb->base); - ret = intel_fbdev_fb_fill_info(display, info, obj, vma); + ret = intel_fbdev_fb_fill_info(display->drm, info, obj, vma); if (ret) goto out_unpin; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c index 210aee9ae88b..c3202ba141c5 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c @@ -3,40 +3,24 @@ * Copyright © 2023 Intel Corporation */ -#include <drm/drm_fb_helper.h> +#include <linux/fb.h> + +#include <drm/drm_print.h> #include "gem/i915_gem_lmem.h" #include "i915_drv.h" -#include "intel_display_core.h" -#include "intel_display_types.h" -#include "intel_fb.h" #include "intel_fbdev_fb.h" -struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +u32 intel_fbdev_fb_pitch_align(u32 stride) { - struct intel_display *display = to_intel_display(helper->dev); - struct drm_i915_private *dev_priv = to_i915(display->drm); - struct drm_framebuffer *fb; - struct drm_mode_fb_cmd2 mode_cmd = {}; - struct drm_i915_gem_object *obj; - int size; - - /* we don't do packed 24bpp */ - if (sizes->surface_bpp == 24) - sizes->surface_bpp = 32; - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - - mode_cmd.pitches[0] = ALIGN(mode_cmd.width * - DIV_ROUND_UP(sizes->surface_bpp, 8), 64); - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); + return ALIGN(stride, 64); +} - size = mode_cmd.pitches[0] * mode_cmd.height; - size = PAGE_ALIGN(size); +struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size) +{ + struct drm_i915_private *dev_priv = to_i915(drm); + struct drm_i915_gem_object *obj; obj = ERR_PTR(-ENODEV); if (HAS_LMEM(dev_priv)) { @@ -51,31 +35,29 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, * * Also skip stolen on MTL as Wa_22018444074 mitigation. */ - if (!display->platform.meteorlake && size * 2 < dev_priv->dsm.usable_size) + if (!IS_METEORLAKE(dev_priv) && size * 2 < dev_priv->dsm.usable_size) obj = i915_gem_object_create_stolen(dev_priv, size); if (IS_ERR(obj)) obj = i915_gem_object_create_shmem(dev_priv, size); } if (IS_ERR(obj)) { - drm_err(display->drm, "failed to allocate framebuffer (%pe)\n", obj); + drm_err(drm, "failed to allocate framebuffer (%pe)\n", obj); return ERR_PTR(-ENOMEM); } - fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), - drm_get_format_info(display->drm, - mode_cmd.pixel_format, - mode_cmd.modifier[0]), - &mode_cmd); - i915_gem_object_put(obj); + return &obj->base; +} - return to_intel_framebuffer(fb); +void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj) +{ + drm_gem_object_put(obj); } -int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info, +int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info, struct drm_gem_object *_obj, struct i915_vma *vma) { - struct drm_i915_private *i915 = to_i915(display->drm); + struct drm_i915_private *i915 = to_i915(drm); struct drm_i915_gem_object *obj = to_intel_bo(_obj); struct i915_gem_ww_ctx ww; void __iomem *vaddr; @@ -107,7 +89,7 @@ int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info vaddr = i915_vma_pin_iomap(vma); if (IS_ERR(vaddr)) { - drm_err(display->drm, + drm_err(drm, "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr); ret = PTR_ERR(vaddr); continue; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h index cb7957272715..fd0b3775dc1f 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h @@ -6,16 +6,18 @@ #ifndef __INTEL_FBDEV_FB_H__ #define __INTEL_FBDEV_FB_H__ -struct drm_fb_helper; -struct drm_fb_helper_surface_size; +#include <linux/types.h> + +struct drm_device; struct drm_gem_object; +struct drm_mode_fb_cmd2; struct fb_info; struct i915_vma; -struct intel_display; -struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes); -int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info, +u32 intel_fbdev_fb_pitch_align(u32 stride); +struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size); +void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj); +int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info, struct drm_gem_object *obj, struct i915_vma *vma); #endif diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 59a36b3a22c1..5bb0090dd5ed 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -9,13 +9,13 @@ #include <drm/drm_print.h> #include "i915_reg.h" -#include "i915_utils.h" #include "intel_atomic.h" #include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_fdi.h" #include "intel_fdi_regs.h" diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c index 6ab2272ab2df..1e9550cb66a3 100644 --- a/drivers/gpu/drm/i915/display/intel_flipq.c +++ b/drivers/gpu/drm/i915/display/intel_flipq.c @@ -7,16 +7,16 @@ #include <drm/drm_print.h> -#include "i915_utils.h" -#include "intel_step.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_core.h" #include "intel_display_types.h" -#include "intel_flipq.h" +#include "intel_display_utils.h" #include "intel_dmc.h" #include "intel_dmc_regs.h" #include "intel_dsb.h" +#include "intel_flipq.h" +#include "intel_step.h" #include "intel_vblank.h" #include "intel_vrr.h" @@ -163,10 +163,10 @@ static void intel_flipq_preempt(struct intel_crtc *crtc, bool preempt) PIPEDMC_FQ_CTRL_PREEMPT, preempt ? PIPEDMC_FQ_CTRL_PREEMPT : 0); if (preempt && - intel_de_wait_for_clear(display, - PIPEDMC_FQ_STATUS(crtc->pipe), - PIPEDMC_FQ_STATUS_BUSY, - intel_flipq_preempt_timeout_ms(display))) + intel_de_wait_for_clear_ms(display, + PIPEDMC_FQ_STATUS(crtc->pipe), + PIPEDMC_FQ_STATUS_BUSY, + intel_flipq_preempt_timeout_ms(display))) drm_err(display->drm, "[CRTC:%d:%s] flip queue preempt timeout\n", crtc->base.base.id, crtc->base.name); } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 73ed28ac9573..03c4978fa5ec 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -56,9 +56,8 @@ */ #include <drm/drm_gem.h> +#include <drm/drm_print.h> -#include "i915_active.h" -#include "i915_vma.h" #include "intel_bo.h" #include "intel_display_trace.h" #include "intel_display_types.h" @@ -103,51 +102,6 @@ static void frontbuffer_flush(struct intel_display *display, } /** - * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip - * @display: display device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called after scheduling a flip on @obj. The actual - * frontbuffer flushing will be delayed until completion is signalled with - * intel_frontbuffer_flip_complete. If an invalidate happens in between this - * flush will be cancelled. - * - * Can be called without any locks held. - */ -void intel_frontbuffer_flip_prepare(struct intel_display *display, - unsigned frontbuffer_bits) -{ - spin_lock(&display->fb_tracking.lock); - display->fb_tracking.flip_bits |= frontbuffer_bits; - /* Remove stale busy bits due to the old buffer. */ - display->fb_tracking.busy_bits &= ~frontbuffer_bits; - spin_unlock(&display->fb_tracking.lock); -} - -/** - * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip - * @display: display device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called after the flip has been latched and will complete - * on the next vblank. It will execute the flush if it hasn't been cancelled yet. - * - * Can be called without any locks held. - */ -void intel_frontbuffer_flip_complete(struct intel_display *display, - unsigned frontbuffer_bits) -{ - spin_lock(&display->fb_tracking.lock); - /* Mask any cancelled flips. */ - frontbuffer_bits &= display->fb_tracking.flip_bits; - display->fb_tracking.flip_bits &= ~frontbuffer_bits; - spin_unlock(&display->fb_tracking.lock); - - if (frontbuffer_bits) - frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP); -} - -/** * intel_frontbuffer_flip - synchronous frontbuffer flip * @display: display device * @frontbuffer_bits: frontbuffer plane tracking bits @@ -173,12 +127,11 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front, enum fb_op_origin origin, unsigned int frontbuffer_bits) { - struct intel_display *display = to_intel_display(front->obj->dev); + struct intel_display *display = front->display; if (origin == ORIGIN_CS) { spin_lock(&display->fb_tracking.lock); display->fb_tracking.busy_bits |= frontbuffer_bits; - display->fb_tracking.flip_bits &= ~frontbuffer_bits; spin_unlock(&display->fb_tracking.lock); } @@ -194,7 +147,10 @@ void __intel_fb_flush(struct intel_frontbuffer *front, enum fb_op_origin origin, unsigned int frontbuffer_bits) { - struct intel_display *display = to_intel_display(front->obj->dev); + struct intel_display *display = front->display; + + if (origin == ORIGIN_DIRTYFB) + intel_bo_frontbuffer_flush_for_display(front); if (origin == ORIGIN_CS) { spin_lock(&display->fb_tracking.lock); @@ -208,12 +164,16 @@ void __intel_fb_flush(struct intel_frontbuffer *front, frontbuffer_flush(display, frontbuffer_bits, origin); } +static void intel_frontbuffer_ref(struct intel_frontbuffer *front) +{ + intel_bo_frontbuffer_ref(front); +} + static void intel_frontbuffer_flush_work(struct work_struct *work) { struct intel_frontbuffer *front = container_of(work, struct intel_frontbuffer, flush_work); - intel_bo_flush_if_display(front->obj); intel_frontbuffer_flush(front, ORIGIN_DIRTYFB); intel_frontbuffer_put(front); } @@ -230,93 +190,31 @@ void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front) if (!front) return; - kref_get(&front->ref); + intel_frontbuffer_ref(front); if (!schedule_work(&front->flush_work)) intel_frontbuffer_put(front); } -static int frontbuffer_active(struct i915_active *ref) +void intel_frontbuffer_init(struct intel_frontbuffer *front, struct drm_device *drm) { - struct intel_frontbuffer *front = - container_of(ref, typeof(*front), write); - - kref_get(&front->ref); - return 0; + front->display = to_intel_display(drm); + atomic_set(&front->bits, 0); + INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work); } -static void frontbuffer_retire(struct i915_active *ref) +void intel_frontbuffer_fini(struct intel_frontbuffer *front) { - struct intel_frontbuffer *front = - container_of(ref, typeof(*front), write); - - intel_frontbuffer_flush(front, ORIGIN_CS); - intel_frontbuffer_put(front); + drm_WARN_ON(front->display->drm, atomic_read(&front->bits)); } -static void frontbuffer_release(struct kref *ref) - __releases(&to_intel_display(front->obj->dev)->fb_tracking.lock) +struct intel_frontbuffer *intel_frontbuffer_get(struct drm_gem_object *obj) { - struct intel_frontbuffer *ret, *front = - container_of(ref, typeof(*front), ref); - struct drm_gem_object *obj = front->obj; - struct intel_display *display = to_intel_display(obj->dev); - - drm_WARN_ON(display->drm, atomic_read(&front->bits)); - - i915_ggtt_clear_scanout(to_intel_bo(obj)); - - ret = intel_bo_set_frontbuffer(obj, NULL); - drm_WARN_ON(display->drm, ret); - spin_unlock(&display->fb_tracking.lock); - - i915_active_fini(&front->write); - - drm_gem_object_put(obj); - kfree_rcu(front, rcu); -} - -struct intel_frontbuffer * -intel_frontbuffer_get(struct drm_gem_object *obj) -{ - struct intel_display *display = to_intel_display(obj->dev); - struct intel_frontbuffer *front, *cur; - - front = intel_bo_get_frontbuffer(obj); - if (front) - return front; - - front = kmalloc(sizeof(*front), GFP_KERNEL); - if (!front) - return NULL; - - drm_gem_object_get(obj); - - front->obj = obj; - kref_init(&front->ref); - atomic_set(&front->bits, 0); - i915_active_init(&front->write, - frontbuffer_active, - frontbuffer_retire, - I915_ACTIVE_RETIRE_SLEEPS); - INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work); - - spin_lock(&display->fb_tracking.lock); - cur = intel_bo_set_frontbuffer(obj, front); - spin_unlock(&display->fb_tracking.lock); - - if (cur != front) { - drm_gem_object_put(obj); - kfree(front); - } - - return cur; + return intel_bo_frontbuffer_get(obj); } void intel_frontbuffer_put(struct intel_frontbuffer *front) { - kref_put_lock(&front->ref, - frontbuffer_release, - &to_intel_display(front->obj->dev)->fb_tracking.lock); + intel_bo_frontbuffer_put(front); } /** @@ -345,17 +243,13 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old, BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); if (old) { - struct intel_display *display = to_intel_display(old->obj->dev); - - drm_WARN_ON(display->drm, + drm_WARN_ON(old->display->drm, !(atomic_read(&old->bits) & frontbuffer_bits)); atomic_andnot(frontbuffer_bits, &old->bits); } if (new) { - struct intel_display *display = to_intel_display(new->obj->dev); - - drm_WARN_ON(display->drm, + drm_WARN_ON(new->display->drm, atomic_read(&new->bits) & frontbuffer_bits); atomic_or(frontbuffer_bits, &new->bits); } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index 2fee12eaf9b6..22677acb4c06 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -26,10 +26,9 @@ #include <linux/atomic.h> #include <linux/bits.h> -#include <linux/kref.h> - -#include "i915_active_types.h" +#include <linux/workqueue_types.h> +struct drm_device; struct drm_gem_object; struct intel_display; @@ -42,12 +41,8 @@ enum fb_op_origin { }; struct intel_frontbuffer { - struct kref ref; + struct intel_display *display; atomic_t bits; - struct i915_active write; - struct drm_gem_object *obj; - struct rcu_head rcu; - struct work_struct flush_work; }; @@ -68,10 +63,6 @@ struct intel_frontbuffer { GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) -void intel_frontbuffer_flip_prepare(struct intel_display *display, - unsigned frontbuffer_bits); -void intel_frontbuffer_flip_complete(struct intel_display *display, - unsigned frontbuffer_bits); void intel_frontbuffer_flip(struct intel_display *display, unsigned frontbuffer_bits); @@ -144,4 +135,7 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old, struct intel_frontbuffer *new, unsigned int frontbuffer_bits); +void intel_frontbuffer_init(struct intel_frontbuffer *front, struct drm_device *drm); +void intel_frontbuffer_fini(struct intel_frontbuffer *front); + #endif /* __INTEL_FRONTBUFFER_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 358210adb8f8..795012d7c24c 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -32,6 +32,7 @@ #include <linux/i2c.h> #include <linux/iopoll.h> +#include <drm/drm_print.h> #include <drm/display/drm_hdcp_helper.h> #include "i915_drv.h" @@ -448,7 +449,7 @@ gmbus_wait_idle(struct intel_display *display) add_wait_queue(&display->gmbus.wait_queue, &wait); intel_de_write_fw(display, GMBUS4(display), irq_enable); - ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10, NULL); + ret = intel_de_wait_fw_ms(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10, NULL); intel_de_write_fw(display, GMBUS4(display), 0); remove_wait_queue(&display->gmbus.wait_queue, &wait); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 531ee122bf82..5e1a96223a9c 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -19,9 +19,9 @@ #include <drm/intel/i915_component.h> #include "i915_reg.h" -#include "i915_utils.h" #include "intel_connector.h" #include "intel_de.h" +#include "intel_display_jiffies.h" #include "intel_display_power.h" #include "intel_display_power_well.h" #include "intel_display_regs.h" @@ -410,9 +410,8 @@ static int intel_hdcp_load_keys(struct intel_display *display) } /* Wait for the keys to load (500us) */ - ret = intel_de_wait_custom(display, HDCP_KEY_STATUS, - HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, - 10, 1, &val); + ret = intel_de_wait_ms(display, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE, + HDCP_KEY_LOAD_DONE, 1, &val); if (ret) return ret; else if (!(val & HDCP_KEY_LOAD_STATUS)) @@ -428,7 +427,7 @@ static int intel_hdcp_load_keys(struct intel_display *display) static int intel_write_sha_text(struct intel_display *display, u32 sha_text) { intel_de_write(display, HDCP_SHA_TEXT, sha_text); - if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { + if (intel_de_wait_for_set_ms(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { drm_err(display->drm, "Timed out waiting for SHA1 ready\n"); return -ETIMEDOUT; } @@ -707,8 +706,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, /* Tell the HW we're done with the hash and wait for it to ACK */ intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); - if (intel_de_wait_for_set(display, HDCP_REP_CTL, - HDCP_SHA1_COMPLETE, 1)) { + if (intel_de_wait_for_set_ms(display, HDCP_REP_CTL, + HDCP_SHA1_COMPLETE, 1)) { drm_err(display->drm, "Timed out waiting for SHA1 complete\n"); return -ETIMEDOUT; } @@ -856,9 +855,9 @@ static int intel_hdcp_auth(struct intel_connector *connector) HDCP_CONF_CAPTURE_AN); /* Wait for An to be acquired */ - if (intel_de_wait_for_set(display, - HDCP_STATUS(display, cpu_transcoder, port), - HDCP_STATUS_AN_READY, 1)) { + if (intel_de_wait_for_set_ms(display, + HDCP_STATUS(display, cpu_transcoder, port), + HDCP_STATUS_AN_READY, 1)) { drm_err(display->drm, "Timed out waiting for An\n"); return -ETIMEDOUT; } @@ -953,10 +952,10 @@ static int intel_hdcp_auth(struct intel_connector *connector) } /* Wait for encryption confirmation */ - if (intel_de_wait_for_set(display, - HDCP_STATUS(display, cpu_transcoder, port), - HDCP_STATUS_ENC, - HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { + if (intel_de_wait_for_set_ms(display, + HDCP_STATUS(display, cpu_transcoder, port), + HDCP_STATUS_ENC, + HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { drm_err(display->drm, "Timed out waiting for encryption\n"); return -ETIMEDOUT; } @@ -1013,9 +1012,9 @@ static int _intel_hdcp_disable(struct intel_connector *connector) hdcp->hdcp_encrypted = false; intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0); - if (intel_de_wait_for_clear(display, - HDCP_STATUS(display, cpu_transcoder, port), - ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { + if (intel_de_wait_for_clear_ms(display, + HDCP_STATUS(display, cpu_transcoder, port), + ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { drm_err(display->drm, "Failed to disable HDCP, timeout clearing status\n"); return -ETIMEDOUT; @@ -1940,11 +1939,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 0, CTL_LINK_ENCRYPTION_REQ); - ret = intel_de_wait_for_set(display, - HDCP2_STATUS(display, cpu_transcoder, - port), - LINK_ENCRYPTION_STATUS, - HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); + ret = intel_de_wait_for_set_ms(display, + HDCP2_STATUS(display, cpu_transcoder, port), + LINK_ENCRYPTION_STATUS, + HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); dig_port->hdcp.auth_status = true; return ret; @@ -1966,11 +1964,10 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), CTL_LINK_ENCRYPTION_REQ, 0); - ret = intel_de_wait_for_clear(display, - HDCP2_STATUS(display, cpu_transcoder, - port), - LINK_ENCRYPTION_STATUS, - HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); + ret = intel_de_wait_for_clear_ms(display, + HDCP2_STATUS(display, cpu_transcoder, port), + LINK_ENCRYPTION_STATUS, + HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); if (ret == -ETIMEDOUT) drm_dbg_kms(display->drm, "Disable Encryption Timedout"); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c index 6a22862d6be1..3e7b480ee9f1 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c @@ -3,13 +3,13 @@ * Copyright 2023, Intel Corporation. */ +#include <drm/drm_print.h> #include <drm/intel/i915_hdcp_interface.h> #include "gem/i915_gem_region.h" #include "gt/intel_gt.h" #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h" #include "i915_drv.h" -#include "i915_utils.h" #include "intel_hdcp_gsc.h" struct intel_hdcp_gsc_context { diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 4ab7e2e3bfd4..908faf17f93d 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -45,7 +45,6 @@ #include <media/cec-notifier.h> #include "g4x_hdmi.h" -#include "i915_utils.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_connector.h" @@ -55,6 +54,7 @@ #include "intel_display_driver.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_gmbus.h" #include "intel_hdcp.h" @@ -68,6 +68,20 @@ #include "intel_snps_phy.h" #include "intel_vrr.h" +bool intel_hdmi_is_frl(u32 clock) +{ + switch (clock) { + case 300000: /* 3 Gbps */ + case 600000: /* 6 Gbps */ + case 800000: /* 8 Gbps */ + case 1000000: /* 10 Gbps */ + case 1200000: /* 12 Gbps */ + return true; + default: + return false; + } +} + static void assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) { @@ -1584,8 +1598,8 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port, intel_de_write(display, HDCP_RPRIME(display, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - ret = intel_de_wait_for_set(display, HDCP_STATUS(display, cpu_transcoder, port), - HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC, 1); + ret = intel_de_wait_for_set_ms(display, HDCP_STATUS(display, cpu_transcoder, port), + HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC, 1); if (ret) { drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n", intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index dec2ad7dd8a2..be2fad57e4ad 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -60,6 +60,7 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state, int src_max_slices, int src_max_slice_width, int hdmi_max_slices, int hdmi_throughput); int intel_hdmi_dsc_get_slice_height(int vactive); +bool intel_hdmi_is_frl(u32 clock); void hsw_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 4451a792600a..235706229ffb 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -24,16 +24,17 @@ #include <linux/debugfs.h> #include <linux/kernel.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_irq.h" -#include "i915_utils.h" #include "intel_connector.h" -#include "intel_display_power.h" #include "intel_display_core.h" +#include "intel_display_power.h" #include "intel_display_rpm.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_hdcp.h" #include "intel_hotplug.h" diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index 4f72f3fb9af5..46c47b3d6f42 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -6,11 +6,11 @@ #include <drm/drm_print.h> #include "i915_reg.h" -#include "i915_utils.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp_aux.h" #include "intel_gmbus.h" #include "intel_hotplug.h" @@ -420,6 +420,9 @@ u32 i9xx_hpd_irq_ack(struct intel_display *display) u32 hotplug_status = 0, hotplug_status_mask; int i; + if (!HAS_HOTPLUG(display)) + return 0; + if (display->platform.g4x || display->platform.valleyview || display->platform.cherryview) hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c index f52dee0ea412..d2862de894fa 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.c +++ b/drivers/gpu/drm/i915/display/intel_link_bw.c @@ -20,6 +20,7 @@ #include "intel_dp_tunnel.h" #include "intel_fdi.h" #include "intel_link_bw.h" +#include "intel_vdsc.h" static int get_forced_link_bpp_x16(struct intel_atomic_state *state, const struct intel_crtc *crtc) @@ -55,7 +56,7 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state, struct intel_display *display = to_intel_display(state); enum pipe pipe; - limits->force_fec_pipes = 0; + limits->link_dsc_pipes = 0; limits->bpp_limit_reached_pipes = 0; for_each_pipe(display, pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); @@ -65,8 +66,8 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state, if (state->base.duplicated && crtc_state) { limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16; - if (crtc_state->fec_enable) - limits->force_fec_pipes |= BIT(pipe); + if (intel_dsc_enabled_on_link(crtc_state)) + limits->link_dsc_pipes |= BIT(pipe); } else { limits->max_bpp_x16[pipe] = INT_MAX; } @@ -265,10 +266,10 @@ assert_link_limit_change_valid(struct intel_display *display, bool bpps_changed = false; enum pipe pipe; - /* FEC can't be forced off after it was forced on. */ + /* DSC can't be disabled after it was enabled. */ if (drm_WARN_ON(display->drm, - (old_limits->force_fec_pipes & new_limits->force_fec_pipes) != - old_limits->force_fec_pipes)) + (old_limits->link_dsc_pipes & new_limits->link_dsc_pipes) != + old_limits->link_dsc_pipes)) return false; for_each_pipe(display, pipe) { @@ -286,8 +287,8 @@ assert_link_limit_change_valid(struct intel_display *display, /* At least one limit must change. */ if (drm_WARN_ON(display->drm, !bpps_changed && - new_limits->force_fec_pipes == - old_limits->force_fec_pipes)) + new_limits->link_dsc_pipes == + old_limits->link_dsc_pipes)) return false; return true; diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h index 95ab7c50c61d..cb18e171037c 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.h +++ b/drivers/gpu/drm/i915/display/intel_link_bw.h @@ -15,7 +15,7 @@ struct intel_connector; struct intel_crtc_state; struct intel_link_bw_limits { - u8 force_fec_pipes; + u8 link_dsc_pipes; u8 bpp_limit_reached_pipes; /* in 1/16 bpp units */ int max_bpp_x16[I915_MAX_PIPES]; diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index d56026c4efdd..9ceabbc981a1 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -31,10 +31,10 @@ #include <drm/drm_edid.h> #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_hdmi.h" #include "intel_lspcon.h" diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.c b/drivers/gpu/drm/i915/display/intel_lt_phy.c new file mode 100644 index 000000000000..a67eb4f7f897 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_lt_phy.c @@ -0,0 +1,2327 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <drm/drm_print.h> + +#include "i915_reg.h" +#include "intel_cx0_phy.h" +#include "intel_cx0_phy_regs.h" +#include "intel_ddi.h" +#include "intel_ddi_buf_trans.h" +#include "intel_de.h" +#include "intel_display.h" +#include "intel_display_types.h" +#include "intel_display_utils.h" +#include "intel_dpll_mgr.h" +#include "intel_hdmi.h" +#include "intel_lt_phy.h" +#include "intel_lt_phy_regs.h" +#include "intel_panel.h" +#include "intel_psr.h" +#include "intel_tc.h" + +#define for_each_lt_phy_lane_in_mask(__lane_mask, __lane) \ + for ((__lane) = 0; (__lane) < 2; (__lane)++) \ + for_each_if((__lane_mask) & BIT(__lane)) + +#define INTEL_LT_PHY_LANE0 BIT(0) +#define INTEL_LT_PHY_LANE1 BIT(1) +#define INTEL_LT_PHY_BOTH_LANES (INTEL_LT_PHY_LANE1 |\ + INTEL_LT_PHY_LANE0) +#define MODE_DP 3 +#define Q32_TO_INT(x) ((x) >> 32) +#define Q32_TO_FRAC(x) ((x) & 0xFFFFFFFF) +#define DCO_MIN_FREQ_MHZ 11850 +#define REF_CLK_KHZ 38400 +#define TDC_RES_MULTIPLIER 10000000ULL + +struct phy_param_t { + u32 val; + u32 addr; +}; + +struct lt_phy_params { + struct phy_param_t pll_reg4; + struct phy_param_t pll_reg3; + struct phy_param_t pll_reg5; + struct phy_param_t pll_reg57; + struct phy_param_t lf; + struct phy_param_t tdc; + struct phy_param_t ssc; + struct phy_param_t bias2; + struct phy_param_t bias_trim; + struct phy_param_t dco_med; + struct phy_param_t dco_fine; + struct phy_param_t ssc_inj; + struct phy_param_t surv_bonus; +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = { + .clock = 162000, + .config = { + 0x83, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x5, 0xa, 0x2a, 0x20 }, + { 0x80, 0x0, 0x0, 0x0 }, + { 0x4, 0x4, 0x82, 0x28 }, + { 0xfa, 0x16, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x5, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x4b, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x13, 0x29, 0x13 }, + { 0x0, 0x5b, 0xe0, 0x0a }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = { + .clock = 270000, + .config = { + 0x8b, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x3, 0xca, 0x34, 0xa0 }, + { 0xe0, 0x0, 0x0, 0x0 }, + { 0x5, 0x4, 0x81, 0xad }, + { 0xfa, 0x11, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x7, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x43, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x13, 0x29, 0x13 }, + { 0x0, 0x5b, 0xe0, 0x0d }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = { + .clock = 540000, + .config = { + 0x93, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x1, 0x4d, 0x34, 0xa0 }, + { 0xe0, 0x0, 0x0, 0x0 }, + { 0xa, 0x4, 0x81, 0xda }, + { 0xfa, 0x11, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x7, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x43, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x13, 0x29, 0x13 }, + { 0x0, 0x5b, 0xe0, 0x0d }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = { + .clock = 810000, + .config = { + 0x9b, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x1, 0x4a, 0x34, 0xa0 }, + { 0xe0, 0x0, 0x0, 0x0 }, + { 0x5, 0x4, 0x80, 0xa8 }, + { 0xfa, 0x11, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x7, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x43, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x13, 0x29, 0x13 }, + { 0x0, 0x5b, 0xe0, 0x0d }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = { + .clock = 1000000, + .config = { + 0x43, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x85, + 0x85, + 0x85, + 0x85, + 0x86, + 0x86, + 0x86, + 0x86, + 0x86, + 0x86, + 0x86, + 0x86, + 0x86, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x1, 0xa, 0x20, 0x80 }, + { 0x6a, 0xaa, 0xaa, 0xab }, + { 0x0, 0x3, 0x4, 0x94 }, + { 0xfa, 0x1c, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x4, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x45, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x14, 0x2a, 0x14 }, + { 0x0, 0x5b, 0xe0, 0x8 }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = { + .clock = 1350000, + .config = { + 0xcb, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x2, 0x9, 0x2b, 0xe0 }, + { 0x90, 0x0, 0x0, 0x0 }, + { 0x8, 0x4, 0x80, 0xe0 }, + { 0xfa, 0x15, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x6, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x49, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x13, 0x29, 0x13 }, + { 0x0, 0x57, 0xe0, 0x0c }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = { + .clock = 2000000, + .config = { + 0x53, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x85, + 0x85, + 0x85, + 0x85, + 0x86, + 0x86, + 0x86, + 0x86, + 0x86, + 0x86, + 0x86, + 0x86, + 0x86, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x1, 0xa, 0x20, 0x80 }, + { 0x6a, 0xaa, 0xaa, 0xab }, + { 0x0, 0x3, 0x4, 0x94 }, + { 0xfa, 0x1c, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x4, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x45, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x14, 0x2a, 0x14 }, + { 0x0, 0x5b, 0xe0, 0x8 }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state * const xe3plpd_lt_dp_tables[] = { + &xe3plpd_lt_dp_rbr, + &xe3plpd_lt_dp_hbr1, + &xe3plpd_lt_dp_hbr2, + &xe3plpd_lt_dp_hbr3, + &xe3plpd_lt_dp_uhbr10, + &xe3plpd_lt_dp_uhbr13_5, + &xe3plpd_lt_dp_uhbr20, + NULL, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = { + .clock = 216000, + .config = { + 0xa3, + 0x2d, + 0x1, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x3, 0xca, 0x2a, 0x20 }, + { 0x80, 0x0, 0x0, 0x0 }, + { 0x6, 0x4, 0x81, 0xbc }, + { 0xfa, 0x16, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x5, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x4b, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x13, 0x29, 0x13 }, + { 0x0, 0x5b, 0xe0, 0x0a }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = { + .clock = 243000, + .config = { + 0xab, + 0x2d, + 0x1, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x3, 0xca, 0x2f, 0x60 }, + { 0xb0, 0x0, 0x0, 0x0 }, + { 0x6, 0x4, 0x81, 0xbc }, + { 0xfa, 0x13, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x6, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x47, 0x48, 0x0, 0x0 }, + { 0x0, 0x0, 0x0, 0x0 }, + { 0x5a, 0x13, 0x29, 0x13 }, + { 0x0, 0x5b, 0xe0, 0x0c }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = { + .clock = 324000, + .config = { + 0xb3, + 0x2d, + 0x1, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x2, 0x8a, 0x2a, 0x20 }, + { 0x80, 0x0, 0x0, 0x0 }, + { 0x6, 0x4, 0x81, 0x28 }, + { 0xfa, 0x16, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x5, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x4b, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x13, 0x29, 0x13 }, + { 0x0, 0x5b, 0xe0, 0x0a }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = { + .clock = 432000, + .config = { + 0xbb, + 0x2d, + 0x1, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x1, 0x4d, 0x2a, 0x20 }, + { 0x80, 0x0, 0x0, 0x0 }, + { 0xc, 0x4, 0x81, 0xbc }, + { 0xfa, 0x16, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x5, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x4b, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x13, 0x29, 0x13 }, + { 0x0, 0x5b, 0xe0, 0x0a }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = { + .clock = 675000, + .config = { + 0xdb, + 0x2d, + 0x1, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x1, 0x4a, 0x2b, 0xe0 }, + { 0x90, 0x0, 0x0, 0x0 }, + { 0x6, 0x4, 0x80, 0xa8 }, + { 0xfa, 0x15, 0x83, 0x11 }, + { 0x80, 0x0f, 0xf9, 0x53 }, + { 0x84, 0x26, 0x6, 0x4 }, + { 0x0, 0xe0, 0x1, 0x0 }, + { 0x49, 0x48, 0x0, 0x0 }, + { 0x27, 0x8, 0x0, 0x0 }, + { 0x5a, 0x13, 0x29, 0x13 }, + { 0x0, 0x57, 0xe0, 0x0c }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state * const xe3plpd_lt_edp_tables[] = { + &xe3plpd_lt_dp_rbr, + &xe3plpd_lt_edp_2_16, + &xe3plpd_lt_edp_2_43, + &xe3plpd_lt_dp_hbr1, + &xe3plpd_lt_edp_3_24, + &xe3plpd_lt_edp_4_32, + &xe3plpd_lt_dp_hbr2, + &xe3plpd_lt_edp_6_75, + &xe3plpd_lt_dp_hbr3, + NULL, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = { + .clock = 25200, + .config = { + 0x84, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x0c, 0x15, 0x27, 0x60 }, + { 0x0, 0x0, 0x0, 0x0 }, + { 0x8, 0x4, 0x98, 0x28 }, + { 0x42, 0x0, 0x84, 0x10 }, + { 0x80, 0x0f, 0xd9, 0xb5 }, + { 0x86, 0x0, 0x0, 0x0 }, + { 0x1, 0xa0, 0x1, 0x0 }, + { 0x4b, 0x0, 0x0, 0x0 }, + { 0x28, 0x0, 0x0, 0x0 }, + { 0x0, 0x14, 0x2a, 0x14 }, + { 0x0, 0x0, 0x0, 0x0 }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_272 = { + .clock = 27200, + .config = { + 0x84, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x0b, 0x15, 0x26, 0xa0 }, + { 0x60, 0x0, 0x0, 0x0 }, + { 0x8, 0x4, 0x96, 0x28 }, + { 0xfa, 0x0c, 0x84, 0x11 }, + { 0x80, 0x0f, 0xd9, 0x53 }, + { 0x86, 0x0, 0x0, 0x0 }, + { 0x1, 0xa0, 0x1, 0x0 }, + { 0x4b, 0x0, 0x0, 0x0 }, + { 0x28, 0x0, 0x0, 0x0 }, + { 0x0, 0x14, 0x2a, 0x14 }, + { 0x0, 0x0, 0x0, 0x0 }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = { + .clock = 74250, + .config = { + 0x84, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x4, 0x15, 0x26, 0xa0 }, + { 0x60, 0x0, 0x0, 0x0 }, + { 0x8, 0x4, 0x88, 0x28 }, + { 0xfa, 0x0c, 0x84, 0x11 }, + { 0x80, 0x0f, 0xd9, 0x53 }, + { 0x86, 0x0, 0x0, 0x0 }, + { 0x1, 0xa0, 0x1, 0x0 }, + { 0x4b, 0x0, 0x0, 0x0 }, + { 0x28, 0x0, 0x0, 0x0 }, + { 0x0, 0x14, 0x2a, 0x14 }, + { 0x0, 0x0, 0x0, 0x0 }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = { + .clock = 148500, + .config = { + 0x84, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x2, 0x15, 0x26, 0xa0 }, + { 0x60, 0x0, 0x0, 0x0 }, + { 0x8, 0x4, 0x84, 0x28 }, + { 0xfa, 0x0c, 0x84, 0x11 }, + { 0x80, 0x0f, 0xd9, 0x53 }, + { 0x86, 0x0, 0x0, 0x0 }, + { 0x1, 0xa0, 0x1, 0x0 }, + { 0x4b, 0x0, 0x0, 0x0 }, + { 0x28, 0x0, 0x0, 0x0 }, + { 0x0, 0x14, 0x2a, 0x14 }, + { 0x0, 0x0, 0x0, 0x0 }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = { + .clock = 594000, + .config = { + 0x84, + 0x2d, + 0x0, + }, + .addr_msb = { + 0x87, + 0x87, + 0x87, + 0x87, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + 0x88, + }, + .addr_lsb = { + 0x10, + 0x0c, + 0x14, + 0xe4, + 0x0c, + 0x10, + 0x14, + 0x18, + 0x48, + 0x40, + 0x4c, + 0x24, + 0x44, + }, + .data = { + { 0x0, 0x4c, 0x2, 0x0 }, + { 0x0, 0x95, 0x26, 0xa0 }, + { 0x60, 0x0, 0x0, 0x0 }, + { 0x8, 0x4, 0x81, 0x28 }, + { 0xfa, 0x0c, 0x84, 0x11 }, + { 0x80, 0x0f, 0xd9, 0x53 }, + { 0x86, 0x0, 0x0, 0x0 }, + { 0x1, 0xa0, 0x1, 0x0 }, + { 0x4b, 0x0, 0x0, 0x0 }, + { 0x28, 0x0, 0x0, 0x0 }, + { 0x0, 0x14, 0x2a, 0x14 }, + { 0x0, 0x0, 0x0, 0x0 }, + { 0x0, 0x0, 0x0, 0x0 }, + }, +}; + +static const struct intel_lt_phy_pll_state * const xe3plpd_lt_hdmi_tables[] = { + &xe3plpd_lt_hdmi_252, + &xe3plpd_lt_hdmi_272, + &xe3plpd_lt_hdmi_742p5, + &xe3plpd_lt_hdmi_1p485, + &xe3plpd_lt_hdmi_5p94, + NULL, +}; + +static u8 intel_lt_phy_get_owned_lane_mask(struct intel_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (!intel_tc_port_in_dp_alt_mode(dig_port)) + return INTEL_LT_PHY_BOTH_LANES; + + return intel_tc_port_max_lane_count(dig_port) > 2 + ? INTEL_LT_PHY_BOTH_LANES : INTEL_LT_PHY_LANE0; +} + +static u8 intel_lt_phy_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr) +{ + return intel_cx0_read(encoder, lane_mask, addr); +} + +static void intel_lt_phy_write(struct intel_encoder *encoder, + u8 lane_mask, u16 addr, u8 data, bool committed) +{ + intel_cx0_write(encoder, lane_mask, addr, data, committed); +} + +static void intel_lt_phy_rmw(struct intel_encoder *encoder, + u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed) +{ + intel_cx0_rmw(encoder, lane_mask, addr, clear, set, committed); +} + +static void intel_lt_phy_clear_status_p2p(struct intel_encoder *encoder, + int lane) +{ + struct intel_display *display = to_intel_display(encoder); + + intel_de_rmw(display, + XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(encoder->port, lane), + XELPDP_PORT_P2M_RESPONSE_READY, 0); +} + +static void +assert_dc_off(struct intel_display *display) +{ + bool enabled; + + enabled = intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF); + drm_WARN_ON(display->drm, !enabled); +} + +static int __intel_lt_phy_p2p_write_once(struct intel_encoder *encoder, + int lane, u16 addr, u8 data, + i915_reg_t mac_reg_addr, + u8 expected_mac_val) +{ + struct intel_display *display = to_intel_display(encoder); + enum port port = encoder->port; + enum phy phy = intel_encoder_to_phy(encoder); + int ack; + u32 val; + + if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), + XELPDP_PORT_P2P_TRANSACTION_PENDING, + XELPDP_MSGBUS_TIMEOUT_MS)) { + drm_dbg_kms(display->drm, + "PHY %c Timeout waiting for previous transaction to complete. Resetting bus.\n", + phy_name(phy)); + intel_cx0_bus_reset(encoder, lane); + return -ETIMEDOUT; + } + + intel_de_rmw(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), 0, 0); + + intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), + XELPDP_PORT_P2P_TRANSACTION_PENDING | + XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED | + XELPDP_PORT_M2P_DATA(data) | + XELPDP_PORT_M2P_ADDRESS(addr)); + + ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); + if (ack < 0) + return ack; + + if (val & XELPDP_PORT_P2M_ERROR_SET) { + drm_dbg_kms(display->drm, + "PHY %c Error occurred during P2P write command. Status: 0x%x\n", + phy_name(phy), val); + intel_lt_phy_clear_status_p2p(encoder, lane); + intel_cx0_bus_reset(encoder, lane); + return -EINVAL; + } + + /* + * RE-VISIT: + * This needs to be added to give PHY time to set everything up this was a requirement + * to get the display up and running + * This is the time PHY takes to settle down after programming the PHY. + */ + udelay(150); + intel_clear_response_ready_flag(encoder, lane); + intel_lt_phy_clear_status_p2p(encoder, lane); + + return 0; +} + +static void __intel_lt_phy_p2p_write(struct intel_encoder *encoder, + int lane, u16 addr, u8 data, + i915_reg_t mac_reg_addr, + u8 expected_mac_val) +{ + struct intel_display *display = to_intel_display(encoder); + enum phy phy = intel_encoder_to_phy(encoder); + int i, status; + + assert_dc_off(display); + + /* 3 tries is assumed to be enough to write successfully */ + for (i = 0; i < 3; i++) { + status = __intel_lt_phy_p2p_write_once(encoder, lane, addr, data, mac_reg_addr, + expected_mac_val); + + if (status == 0) + return; + } + + drm_err_once(display->drm, + "PHY %c P2P Write %04x failed after %d retries.\n", phy_name(phy), addr, i); +} + +static void intel_lt_phy_p2p_write(struct intel_encoder *encoder, + u8 lane_mask, u16 addr, u8 data, + i915_reg_t mac_reg_addr, + u8 expected_mac_val) +{ + int lane; + + for_each_lt_phy_lane_in_mask(lane_mask, lane) + __intel_lt_phy_p2p_write(encoder, lane, addr, data, mac_reg_addr, expected_mac_val); +} + +static void +intel_lt_phy_setup_powerdown(struct intel_encoder *encoder, u8 lane_count) +{ + /* + * The new PORT_BUF_CTL6 stuff for dc5 entry and exit needs to be handled + * by dmc firmware not explicitly mentioned in Bspec. This leaves this + * function as a wrapper only but keeping it expecting future changes. + */ + intel_cx0_setup_powerdown(encoder); +} + +static void +intel_lt_phy_powerdown_change_sequence(struct intel_encoder *encoder, + u8 lane_mask, u8 state) +{ + intel_cx0_powerdown_change_sequence(encoder, lane_mask, state); +} + +static void +intel_lt_phy_lane_reset(struct intel_encoder *encoder, + u8 lane_count) +{ + struct intel_display *display = to_intel_display(encoder); + enum port port = encoder->port; + enum phy phy = intel_encoder_to_phy(encoder); + u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); + u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES + ? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1) + : XELPDP_LANE_PIPE_RESET(0); + u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES + ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) | + XELPDP_LANE_PHY_CURRENT_STATUS(1)) + : XELPDP_LANE_PHY_CURRENT_STATUS(0); + u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES + ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) | + XE3PLPDP_LANE_PHY_PULSE_STATUS(1)) + : XE3PLPDP_LANE_PHY_PULSE_STATUS(0); + + intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port), + XE3PLPD_MACCLK_RATE_MASK, XE3PLPD_MACCLK_RATE_DEF); + + intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port), + XE3PLPDP_PHY_MODE_MASK, XE3PLPDP_PHY_MODE_DP); + + intel_lt_phy_setup_powerdown(encoder, lane_count); + intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask, + XELPDP_P2_STATE_RESET); + + intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port), + XE3PLPD_MACCLK_RESET_0, 0); + + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_LANE_PCLK_PLL_REQUEST(0), + XELPDP_LANE_PCLK_PLL_REQUEST(0)); + + if (intel_de_wait_for_set_ms(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_LANE_PCLK_PLL_ACK(0), + XE3PLPD_MACCLK_TURNON_LATENCY_MS)) + drm_warn(display->drm, "PHY %c PLL MacCLK assertion ack not done\n", + phy_name(phy)); + + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_FORWARD_CLOCK_UNGATE, + XELPDP_FORWARD_CLOCK_UNGATE); + + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_pipe_reset | lane_phy_pulse_status, 0); + + if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_phy_current_status, + XE3PLPD_RESET_END_LATENCY_MS)) + drm_warn(display->drm, "PHY %c failed to bring out of lane reset\n", + phy_name(phy)); + + if (intel_de_wait_for_set_ms(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_phy_pulse_status, + XE3PLPD_RATE_CALIB_DONE_LATENCY_MS)) + drm_warn(display->drm, "PHY %c PLL rate not changed\n", + phy_name(phy)); + + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_pulse_status, 0); +} + +static void +intel_lt_phy_program_port_clock_ctl(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + bool lane_reversal) +{ + struct intel_display *display = to_intel_display(encoder); + u32 val = 0; + + intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port), + XELPDP_PORT_REVERSAL, + lane_reversal ? XELPDP_PORT_REVERSAL : 0); + + val |= XELPDP_FORWARD_CLOCK_UNGATE; + + /* + * We actually mean MACCLK here and not MAXPCLK when using LT Phy + * but since the register bits still remain the same we use + * the same definition + */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && + intel_hdmi_is_frl(crtc_state->port_clock)) + val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK); + else + val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK); + + /* DP2.0 10G and 20G rates enable MPLLA*/ + if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000) + val |= XELPDP_SSC_ENABLE_PLLA; + else + val |= crtc_state->dpll_hw_state.ltpll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; + + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), + XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | + XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA | + XELPDP_SSC_ENABLE_PLLB, val); +} + +static u32 intel_lt_phy_get_dp_clock(u8 rate) +{ + switch (rate) { + case 0: + return 162000; + case 1: + return 270000; + case 2: + return 540000; + case 3: + return 810000; + case 4: + return 216000; + case 5: + return 243000; + case 6: + return 324000; + case 7: + return 432000; + case 8: + return 1000000; + case 9: + return 1350000; + case 10: + return 2000000; + case 11: + return 675000; + default: + MISSING_CASE(rate); + return 0; + } +} + +static bool +intel_lt_phy_config_changed(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + u8 val, rate; + u32 clock; + + val = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, + LT_PHY_VDR_0_CONFIG); + rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK, val); + + /* + * The only time we do not reconfigure the PLL is when we are + * using 1.62 Gbps clock since PHY PLL defaults to that + * otherwise we always need to reconfigure it. + */ + if (intel_crtc_has_dp_encoder(crtc_state)) { + clock = intel_lt_phy_get_dp_clock(rate); + if (crtc_state->port_clock == 1620000 && crtc_state->port_clock == clock) + return false; + } + + return true; +} + +static intel_wakeref_t intel_lt_phy_transaction_begin(struct intel_encoder *encoder) +{ + struct intel_display *display = to_intel_display(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + intel_wakeref_t wakeref; + + intel_psr_pause(intel_dp); + wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF); + + return wakeref; +} + +static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref) +{ + struct intel_display *display = to_intel_display(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + intel_psr_resume(intel_dp); + intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref); +} + +static const struct intel_lt_phy_pll_state * const * +intel_lt_phy_pll_tables_get(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + if (intel_crtc_has_dp_encoder(crtc_state)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return xe3plpd_lt_edp_tables; + + return xe3plpd_lt_dp_tables; + } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + return xe3plpd_lt_hdmi_tables; + } + + MISSING_CASE(encoder->type); + return NULL; +} + +static bool +intel_lt_phy_pll_is_ssc_enabled(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + struct intel_display *display = to_intel_display(encoder); + + if (intel_crtc_has_dp_encoder(crtc_state)) { + if (intel_panel_use_ssc(display)) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + return (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5); + } + } + + return false; +} + +static u64 mul_q32_u32(u64 a_q32, u32 b) +{ + u64 p0, p1, carry, result; + u64 x_hi = a_q32 >> 32; + u64 x_lo = a_q32 & 0xFFFFFFFFULL; + + p0 = x_lo * (u64)b; + p1 = x_hi * (u64)b; + carry = p0 >> 32; + result = (p1 << 32) + (carry << 32) + (p0 & 0xFFFFFFFFULL); + + return result; +} + +static bool +calculate_target_dco_and_loop_cnt(u32 frequency_khz, u64 *target_dco_mhz, u32 *loop_cnt) +{ + u32 ppm_value = 1; + u32 dco_min_freq = DCO_MIN_FREQ_MHZ; + u32 dco_max_freq = 16200; + u32 dco_min_freq_low = 10000; + u32 dco_max_freq_low = 12000; + u64 val = 0; + u64 refclk_khz = REF_CLK_KHZ; + u64 m2div = 0; + u64 val_with_frac = 0; + u64 ppm = 0; + u64 temp0 = 0, temp1, scale; + int ppm_cnt, dco_count, y; + + for (ppm_cnt = 0; ppm_cnt < 5; ppm_cnt++) { + ppm_value = ppm_cnt == 2 ? 2 : 1; + for (dco_count = 0; dco_count < 2; dco_count++) { + if (dco_count == 1) { + dco_min_freq = dco_min_freq_low; + dco_max_freq = dco_max_freq_low; + } + for (y = 2; y <= 255; y += 2) { + val = div64_u64((u64)y * frequency_khz, 200); + m2div = div64_u64(((u64)(val) << 32), refclk_khz); + m2div = mul_q32_u32(m2div, 500); + val_with_frac = mul_q32_u32(m2div, refclk_khz); + val_with_frac = div64_u64(val_with_frac, 500); + temp1 = Q32_TO_INT(val_with_frac); + temp0 = (temp1 > val) ? (temp1 - val) : + (val - temp1); + ppm = div64_u64(temp0, val); + if (temp1 >= dco_min_freq && + temp1 <= dco_max_freq && + ppm < ppm_value) { + /* Round to two places */ + scale = (1ULL << 32) / 100; + temp0 = DIV_ROUND_UP_ULL(val_with_frac, + scale); + *target_dco_mhz = temp0 * scale; + *loop_cnt = y; + return true; + } + } + } + } + + return false; +} + +static void set_phy_vdr_addresses(struct lt_phy_params *p, int pll_type) +{ + p->pll_reg4.addr = PLL_REG_ADDR(PLL_REG4_ADDR, pll_type); + p->pll_reg3.addr = PLL_REG_ADDR(PLL_REG3_ADDR, pll_type); + p->pll_reg5.addr = PLL_REG_ADDR(PLL_REG5_ADDR, pll_type); + p->pll_reg57.addr = PLL_REG_ADDR(PLL_REG57_ADDR, pll_type); + p->lf.addr = PLL_REG_ADDR(PLL_LF_ADDR, pll_type); + p->tdc.addr = PLL_REG_ADDR(PLL_TDC_ADDR, pll_type); + p->ssc.addr = PLL_REG_ADDR(PLL_SSC_ADDR, pll_type); + p->bias2.addr = PLL_REG_ADDR(PLL_BIAS2_ADDR, pll_type); + p->bias_trim.addr = PLL_REG_ADDR(PLL_BIAS_TRIM_ADDR, pll_type); + p->dco_med.addr = PLL_REG_ADDR(PLL_DCO_MED_ADDR, pll_type); + p->dco_fine.addr = PLL_REG_ADDR(PLL_DCO_FINE_ADDR, pll_type); + p->ssc_inj.addr = PLL_REG_ADDR(PLL_SSC_INJ_ADDR, pll_type); + p->surv_bonus.addr = PLL_REG_ADDR(PLL_SURV_BONUS_ADDR, pll_type); +} + +static void compute_ssc(struct lt_phy_params *p, u32 ana_cfg) +{ + int ssc_stepsize = 0; + int ssc_steplen = 0; + int ssc_steplog = 0; + + p->ssc.val = (1 << 31) | (ana_cfg << 24) | (ssc_steplog << 16) | + (ssc_stepsize << 8) | ssc_steplen; +} + +static void compute_bias2(struct lt_phy_params *p) +{ + u32 ssc_en_local = 0; + u64 dynctrl_ovrd_en = 0; + + p->bias2.val = (dynctrl_ovrd_en << 31) | (ssc_en_local << 30) | + (1 << 23) | (1 << 24) | (32 << 16) | (1 << 8); +} + +static void compute_tdc(struct lt_phy_params *p, u64 tdc_fine) +{ + u32 settling_time = 15; + u32 bias_ovr_en = 1; + u32 coldstart = 1; + u32 true_lock = 2; + u32 early_lock = 1; + u32 lock_ovr_en = 1; + u32 lock_thr = tdc_fine ? 3 : 5; + u32 unlock_thr = tdc_fine ? 5 : 11; + + p->tdc.val = (u32)((2 << 30) + (settling_time << 16) + (bias_ovr_en << 15) + + (lock_ovr_en << 14) + (coldstart << 12) + (true_lock << 10) + + (early_lock << 8) + (unlock_thr << 4) + lock_thr); +} + +static void compute_dco_med(struct lt_phy_params *p) +{ + u32 cselmed_en = 0; + u32 cselmed_dyn_adj = 0; + u32 cselmed_ratio = 39; + u32 cselmed_thr = 8; + + p->dco_med.val = (cselmed_en << 31) + (cselmed_dyn_adj << 30) + + (cselmed_ratio << 24) + (cselmed_thr << 21); +} + +static void compute_dco_fine(struct lt_phy_params *p, u32 dco_12g) +{ + u32 dco_fine0_tune_2_0 = 0; + u32 dco_fine1_tune_2_0 = 0; + u32 dco_fine2_tune_2_0 = 0; + u32 dco_fine3_tune_2_0 = 0; + u32 dco_dith0_tune_2_0 = 0; + u32 dco_dith1_tune_2_0 = 0; + + dco_fine0_tune_2_0 = dco_12g ? 4 : 3; + dco_fine1_tune_2_0 = 2; + dco_fine2_tune_2_0 = dco_12g ? 2 : 1; + dco_fine3_tune_2_0 = 5; + dco_dith0_tune_2_0 = dco_12g ? 4 : 3; + dco_dith1_tune_2_0 = 2; + + p->dco_fine.val = (dco_dith1_tune_2_0 << 19) + + (dco_dith0_tune_2_0 << 16) + + (dco_fine3_tune_2_0 << 11) + + (dco_fine2_tune_2_0 << 8) + + (dco_fine1_tune_2_0 << 3) + + dco_fine0_tune_2_0; +} + +int +intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state, + u32 frequency_khz) +{ +#define DATA_ASSIGN(i, pll_reg) \ + do { \ + lt_state->data[i][0] = (u8)((((pll_reg).val) & 0xFF000000) >> 24); \ + lt_state->data[i][1] = (u8)((((pll_reg).val) & 0x00FF0000) >> 16); \ + lt_state->data[i][2] = (u8)((((pll_reg).val) & 0x0000FF00) >> 8); \ + lt_state->data[i][3] = (u8)((((pll_reg).val) & 0x000000FF)); \ + } while (0) +#define ADDR_ASSIGN(i, pll_reg) \ + do { \ + lt_state->addr_msb[i] = ((pll_reg).addr >> 8) & 0xFF; \ + lt_state->addr_lsb[i] = (pll_reg).addr & 0xFF; \ + } while (0) + + bool found = false; + struct lt_phy_params p; + u32 dco_fmin = DCO_MIN_FREQ_MHZ; + u64 refclk_khz = REF_CLK_KHZ; + u32 refclk_mhz_int = REF_CLK_KHZ / 1000; + u64 m2div = 0; + u64 target_dco_mhz = 0; + u64 tdc_fine, tdc_targetcnt; + u64 feedfwd_gain ,feedfwd_cal_en; + u64 tdc_res = 30; + u32 prop_coeff; + u32 int_coeff; + u32 ndiv = 1; + u32 m1div = 1, m2div_int, m2div_frac; + u32 frac_en; + u32 ana_cfg; + u32 loop_cnt = 0; + u32 gain_ctrl = 2; + u32 postdiv = 0; + u32 dco_12g = 0; + u32 pll_type = 0; + u32 d1 = 2, d3 = 5, d4 = 0, d5 = 0; + u32 d6 = 0, d6_new = 0; + u32 d7, d8 = 0; + u32 bonus_7_0 = 0; + u32 csel2fo = 11; + u32 csel2fo_ovrd_en = 1; + u64 temp0, temp1, temp2, temp3; + + p.surv_bonus.val = (bonus_7_0 << 16); + p.pll_reg4.val = (refclk_mhz_int << 17) + + (ndiv << 9) + (1 << 4); + p.bias_trim.val = (csel2fo_ovrd_en << 30) + (csel2fo << 24); + p.ssc_inj.val = 0; + found = calculate_target_dco_and_loop_cnt(frequency_khz, &target_dco_mhz, &loop_cnt); + if (!found) + return -EINVAL; + + m2div = div64_u64(target_dco_mhz, (refclk_khz * ndiv * m1div)); + m2div = mul_q32_u32(m2div, 1000); + if (Q32_TO_INT(m2div) > 511) + return -EINVAL; + + m2div_int = (u32)Q32_TO_INT(m2div); + m2div_frac = (u32)(Q32_TO_FRAC(m2div)); + frac_en = (m2div_frac > 0) ? 1 : 0; + + if (frac_en > 0) + tdc_res = 70; + else + tdc_res = 36; + tdc_fine = tdc_res > 50 ? 1 : 0; + temp0 = tdc_res * 40 * 11; + temp1 = div64_u64(((4 * TDC_RES_MULTIPLIER) + temp0) * 500, temp0 * refclk_khz); + temp2 = div64_u64(temp0 * refclk_khz, 1000); + temp3 = div64_u64(((8 * TDC_RES_MULTIPLIER) + temp2), temp2); + tdc_targetcnt = tdc_res < 50 ? (int)(temp1) : (int)(temp3); + tdc_targetcnt = (int)(tdc_targetcnt / 2); + temp0 = mul_q32_u32(target_dco_mhz, tdc_res); + temp0 >>= 32; + feedfwd_gain = (m2div_frac > 0) ? div64_u64(m1div * TDC_RES_MULTIPLIER, temp0) : 0; + feedfwd_cal_en = frac_en; + + temp0 = (u32)Q32_TO_INT(target_dco_mhz); + prop_coeff = (temp0 >= dco_fmin) ? 3 : 4; + int_coeff = (temp0 >= dco_fmin) ? 7 : 8; + ana_cfg = (temp0 >= dco_fmin) ? 8 : 6; + dco_12g = (temp0 >= dco_fmin) ? 0 : 1; + + if (temp0 > 12960) + d7 = 10; + else + d7 = 8; + + d8 = loop_cnt / 2; + d4 = d8 * 2; + + /* Compute pll_reg3,5,57 & lf */ + p.pll_reg3.val = (u32)((d4 << 21) + (d3 << 18) + (d1 << 15) + (m2div_int << 5)); + p.pll_reg5.val = m2div_frac; + postdiv = (d5 == 0) ? 9 : d5; + d6_new = (d6 == 0) ? 40 : d6; + p.pll_reg57.val = (d7 << 24) + (postdiv << 15) + (d8 << 7) + d6_new; + p.lf.val = (u32)((frac_en << 31) + (1 << 30) + (frac_en << 29) + + (feedfwd_cal_en << 28) + (tdc_fine << 27) + + (gain_ctrl << 24) + (feedfwd_gain << 16) + + (int_coeff << 12) + (prop_coeff << 8) + tdc_targetcnt); + + compute_ssc(&p, ana_cfg); + compute_bias2(&p); + compute_tdc(&p, tdc_fine); + compute_dco_med(&p); + compute_dco_fine(&p, dco_12g); + + pll_type = ((frequency_khz == 10000) || (frequency_khz == 20000) || + (frequency_khz == 2500) || (dco_12g == 1)) ? 0 : 1; + set_phy_vdr_addresses(&p, pll_type); + + lt_state->config[0] = 0x84; + lt_state->config[1] = 0x2d; + ADDR_ASSIGN(0, p.pll_reg4); + ADDR_ASSIGN(1, p.pll_reg3); + ADDR_ASSIGN(2, p.pll_reg5); + ADDR_ASSIGN(3, p.pll_reg57); + ADDR_ASSIGN(4, p.lf); + ADDR_ASSIGN(5, p.tdc); + ADDR_ASSIGN(6, p.ssc); + ADDR_ASSIGN(7, p.bias2); + ADDR_ASSIGN(8, p.bias_trim); + ADDR_ASSIGN(9, p.dco_med); + ADDR_ASSIGN(10, p.dco_fine); + ADDR_ASSIGN(11, p.ssc_inj); + ADDR_ASSIGN(12, p.surv_bonus); + DATA_ASSIGN(0, p.pll_reg4); + DATA_ASSIGN(1, p.pll_reg3); + DATA_ASSIGN(2, p.pll_reg5); + DATA_ASSIGN(3, p.pll_reg57); + DATA_ASSIGN(4, p.lf); + DATA_ASSIGN(5, p.tdc); + DATA_ASSIGN(6, p.ssc); + DATA_ASSIGN(7, p.bias2); + DATA_ASSIGN(8, p.bias_trim); + DATA_ASSIGN(9, p.dco_med); + DATA_ASSIGN(10, p.dco_fine); + DATA_ASSIGN(11, p.ssc_inj); + DATA_ASSIGN(12, p.surv_bonus); + + return 0; +} + +static int +intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state) +{ +#define REGVAL(i) ( \ + (lt_state->data[i][3]) | \ + (lt_state->data[i][2] << 8) | \ + (lt_state->data[i][1] << 16) | \ + (lt_state->data[i][0] << 24) \ +) + + struct intel_display *display = to_intel_display(crtc_state); + const struct intel_lt_phy_pll_state *lt_state = + &crtc_state->dpll_hw_state.ltpll; + int clk = 0; + u32 d8, pll_reg_5, pll_reg_3, pll_reg_57, m2div_frac, m2div_int; + u64 temp0, temp1; + /* + * The algorithm uses '+' to combine bitfields when + * constructing PLL_reg3 and PLL_reg57: + * PLL_reg57 = (D7 << 24) + (postdiv << 15) + (D8 << 7) + D6_new; + * PLL_reg3 = (D4 << 21) + (D3 << 18) + (D1 << 15) + (m2div_int << 5); + * + * However, this is likely intended to be a bitwise OR operation, + * as each field occupies distinct, non-overlapping bits in the register. + * + * PLL_reg57 is composed of following fields packed into a 32-bit value: + * - D7: max value 10 -> fits in 4 bits -> placed at bits 24-27 + * - postdiv: max value 9 -> fits in 4 bits -> placed at bits 15-18 + * - D8: derived from loop_cnt / 2, max 127 -> fits in 7 bits + * (though 8 bits are given to it) -> placed at bits 7-14 + * - D6_new: fits in lower 7 bits -> placed at bits 0-6 + * PLL_reg57 = (D7 << 24) | (postdiv << 15) | (D8 << 7) | D6_new; + * + * Similarly, PLL_reg3 is packed as: + * - D4: max value 256 -> fits in 9 bits -> placed at bits 21-29 + * - D3: max value 9 -> fits in 4 bits -> placed at bits 18-21 + * - D1: max value 2 -> fits in 2 bits -> placed at bits 15-16 + * - m2div_int: max value 511 -> fits in 9 bits (10 bits allocated) + * -> placed at bits 5-14 + * PLL_reg3 = (D4 << 21) | (D3 << 18) | (D1 << 15) | (m2div_int << 5); + */ + pll_reg_5 = REGVAL(2); + pll_reg_3 = REGVAL(1); + pll_reg_57 = REGVAL(3); + m2div_frac = pll_reg_5; + + /* + * From forward algorithm we know + * m2div = 2 * m2 + * val = y * frequency * 5 + * So now, + * frequency = (m2 * 2 * refclk_khz / (d8 * 10)) + * frequency = (m2div * refclk_khz / (d8 * 10)) + */ + d8 = (pll_reg_57 & REG_GENMASK(14, 7)) >> 7; + if (d8 == 0) { + drm_WARN_ON(display->drm, + "Invalid port clock using lowest HDMI portclock\n"); + return xe3plpd_lt_hdmi_252.clock; + } + m2div_int = (pll_reg_3 & REG_GENMASK(14, 5)) >> 5; + temp0 = ((u64)m2div_frac * REF_CLK_KHZ) >> 32; + temp1 = (u64)m2div_int * REF_CLK_KHZ; + + clk = div_u64((temp1 + temp0), d8 * 10); + + return clk; +} + +int +intel_lt_phy_calc_port_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + int clk; + const struct intel_lt_phy_pll_state *lt_state = + &crtc_state->dpll_hw_state.ltpll; + u8 mode, rate; + + mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK, + lt_state->config[0]); + /* + * For edp/dp read the clock value from the tables + * and return the clock as the algorithm used for + * calculating the port clock does not exactly matches + * with edp/dp clock. + */ + if (mode == MODE_DP) { + rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK, + lt_state->config[0]); + clk = intel_lt_phy_get_dp_clock(rate); + } else { + clk = intel_lt_phy_calc_hdmi_port_clock(crtc_state); + } + + return clk; +} + +int +intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + const struct intel_lt_phy_pll_state * const *tables; + int i; + + tables = intel_lt_phy_pll_tables_get(crtc_state, encoder); + if (!tables) + return -EINVAL; + + for (i = 0; tables[i]; i++) { + if (crtc_state->port_clock == tables[i]->clock) { + crtc_state->dpll_hw_state.ltpll = *tables[i]; + if (intel_crtc_has_dp_encoder(crtc_state)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + crtc_state->dpll_hw_state.ltpll.config[2] = 1; + } + crtc_state->dpll_hw_state.ltpll.ssc_enabled = + intel_lt_phy_pll_is_ssc_enabled(crtc_state, encoder); + return 0; + } + } + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + return intel_lt_phy_calculate_hdmi_state(&crtc_state->dpll_hw_state.ltpll, + crtc_state->port_clock); + } + + return -EINVAL; +} + +static void +intel_lt_phy_program_pll(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); + int i, j, k; + + intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_0_CONFIG, + crtc_state->dpll_hw_state.ltpll.config[0], MB_WRITE_COMMITTED); + intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG, + crtc_state->dpll_hw_state.ltpll.config[1], MB_WRITE_COMMITTED); + intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_2_CONFIG, + crtc_state->dpll_hw_state.ltpll.config[2], MB_WRITE_COMMITTED); + + for (i = 0; i <= 12; i++) { + intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_MSB(i), + crtc_state->dpll_hw_state.ltpll.addr_msb[i], + MB_WRITE_COMMITTED); + intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_LSB(i), + crtc_state->dpll_hw_state.ltpll.addr_lsb[i], + MB_WRITE_COMMITTED); + + for (j = 3, k = 0; j >= 0; j--, k++) + intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, + LT_PHY_VDR_X_DATAY(i, j), + crtc_state->dpll_hw_state.ltpll.data[i][k], + MB_WRITE_COMMITTED); + } +} + +static void +intel_lt_phy_enable_disable_tx(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + bool lane_reversal = dig_port->lane_reversal; + u8 lane_count = crtc_state->lane_count; + bool is_dp_alt = + intel_tc_port_in_dp_alt_mode(dig_port); + enum intel_tc_pin_assignment tc_pin = + intel_tc_port_get_pin_assignment(dig_port); + u8 transmitter_mask = 0; + + /* + * We have a two transmitters per lane and total of 2 PHY lanes so a total + * of 4 transmitters. We prepare a mask of the lanes that need to be activated + * and the transmitter which need to be activated for each lane. TX 0,1 correspond + * to LANE0 and TX 2, 3 correspond to LANE1. + */ + + switch (lane_count) { + case 1: + transmitter_mask = lane_reversal ? REG_BIT8(3) : REG_BIT8(0); + if (is_dp_alt) { + if (tc_pin == INTEL_TC_PIN_ASSIGNMENT_D) + transmitter_mask = REG_BIT8(0); + else + transmitter_mask = REG_BIT8(1); + } + break; + case 2: + transmitter_mask = lane_reversal ? REG_GENMASK8(3, 2) : REG_GENMASK8(1, 0); + if (is_dp_alt) + transmitter_mask = REG_GENMASK8(1, 0); + break; + case 3: + transmitter_mask = lane_reversal ? REG_GENMASK8(3, 1) : REG_GENMASK8(2, 0); + if (is_dp_alt) + transmitter_mask = REG_GENMASK8(2, 0); + break; + case 4: + transmitter_mask = REG_GENMASK8(3, 0); + break; + default: + MISSING_CASE(lane_count); + transmitter_mask = REG_GENMASK8(3, 0); + break; + } + + if (transmitter_mask & BIT(0)) { + intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0), + LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0), + LT_PHY_TX_LANE_ENABLE); + } else { + intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0), + 0, LT_PHY_TXY_CTL10_MAC(0), 0); + } + + if (transmitter_mask & BIT(1)) { + intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1), + LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1), + LT_PHY_TX_LANE_ENABLE); + } else { + intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1), + 0, LT_PHY_TXY_CTL10_MAC(1), 0); + } + + if (transmitter_mask & BIT(2)) { + intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0), + LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0), + LT_PHY_TX_LANE_ENABLE); + } else { + intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0), + 0, LT_PHY_TXY_CTL10_MAC(0), 0); + } + + if (transmitter_mask & BIT(3)) { + intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1), + LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1), + LT_PHY_TX_LANE_ENABLE); + } else { + intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1), + 0, LT_PHY_TXY_CTL10_MAC(1), 0); + } +} + +void intel_lt_phy_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + bool lane_reversal = dig_port->lane_reversal; + u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); + enum phy phy = intel_encoder_to_phy(encoder); + enum port port = encoder->port; + intel_wakeref_t wakeref = 0; + u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES + ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) | + XE3PLPDP_LANE_PHY_PULSE_STATUS(1)) + : XE3PLPDP_LANE_PHY_PULSE_STATUS(0); + u8 rate_update; + + wakeref = intel_lt_phy_transaction_begin(encoder); + + /* 1. Enable MacCLK at default 162 MHz frequency. */ + intel_lt_phy_lane_reset(encoder, crtc_state->lane_count); + + /* 2. Program PORT_CLOCK_CTL register to configure clock muxes, gating, and SSC. */ + intel_lt_phy_program_port_clock_ctl(encoder, crtc_state, lane_reversal); + + /* 3. Change owned PHY lanes power to Ready state. */ + intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask, + XELPDP_P2_STATE_READY); + + /* + * 4. Read the PHY message bus VDR register PHY_VDR_0_Config check enabled PLL type, + * encoded rate and encoded mode. + */ + if (intel_lt_phy_config_changed(encoder, crtc_state)) { + /* + * 5. Program the PHY internal PLL registers over PHY message bus for the desired + * frequency and protocol type + */ + intel_lt_phy_program_pll(encoder, crtc_state); + + /* 6. Use the P2P transaction flow */ + /* + * 6.1. Set the PHY VDR register 0xCC4[Rate Control VDR Update] = 1 over PHY message + * bus for Owned PHY Lanes. + */ + /* + * 6.2. Poll for P2P Transaction Ready = "1" and read the MAC message bus VDR + * register at offset 0xC00 for Owned PHY Lanes*. + */ + /* 6.3. Clear P2P transaction Ready bit. */ + intel_lt_phy_p2p_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE, + LT_PHY_RATE_CONTROL_VDR_UPDATE, LT_PHY_MAC_VDR, + LT_PHY_PCLKIN_GATE); + + /* 7. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */ + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_LANE_PCLK_PLL_REQUEST(0), 0); + + /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */ + if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_LANE_PCLK_PLL_ACK(0), + XE3PLPD_MACCLK_TURNOFF_LATENCY_US)) + drm_warn(display->drm, "PHY %c PLL MacCLK ack deassertion timeout\n", + phy_name(phy)); + + /* + * 9. Follow the Display Voltage Frequency Switching - Sequence Before Frequency + * Change. We handle this step in bxt_set_cdclk(). + */ + /* 10. Program DDI_CLK_VALFREQ to match intended DDI clock frequency. */ + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), + crtc_state->port_clock); + + /* 11. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 1. */ + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_LANE_PCLK_PLL_REQUEST(0), + XELPDP_LANE_PCLK_PLL_REQUEST(0)); + + /* 12. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 1. */ + if (intel_de_wait_for_set_ms(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_LANE_PCLK_PLL_ACK(0), + XE3PLPD_MACCLK_TURNON_LATENCY_MS)) + drm_warn(display->drm, "PHY %c PLL MacCLK ack assertion timeout\n", + phy_name(phy)); + + /* + * 13. Ungate the forward clock by setting + * PORT_CLOCK_CTL[Forward Clock Ungate] = 1. + */ + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_FORWARD_CLOCK_UNGATE, + XELPDP_FORWARD_CLOCK_UNGATE); + + /* 14. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */ + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_phy_pulse_status, + lane_phy_pulse_status); + /* + * 15. Clear the PHY VDR register 0xCC4[Rate Control VDR Update] over + * PHY message bus for Owned PHY Lanes. + */ + rate_update = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_RATE_UPDATE); + rate_update &= ~LT_PHY_RATE_CONTROL_VDR_UPDATE; + intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE, + rate_update, MB_WRITE_COMMITTED); + + /* 16. Poll for PORT_BUF_CTL2 register PHY Pulse Status = 1 for Owned PHY Lanes. */ + if (intel_de_wait_for_set_ms(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_phy_pulse_status, + XE3PLPD_RATE_CALIB_DONE_LATENCY_MS)) + drm_warn(display->drm, "PHY %c PLL rate not changed\n", + phy_name(phy)); + + /* 17. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */ + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_phy_pulse_status, + lane_phy_pulse_status); + } else { + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock); + } + + /* + * 18. Follow the Display Voltage Frequency Switching - Sequence After Frequency Change. + * We handle this step in bxt_set_cdclk() + */ + /* 19. Move the PHY powerdown state to Active and program to enable/disable transmitters */ + intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask, + XELPDP_P0_STATE_ACTIVE); + + intel_lt_phy_enable_disable_tx(encoder, crtc_state); + intel_lt_phy_transaction_end(encoder, wakeref); +} + +void intel_lt_phy_pll_disable(struct intel_encoder *encoder) +{ + struct intel_display *display = to_intel_display(encoder); + enum phy phy = intel_encoder_to_phy(encoder); + enum port port = encoder->port; + intel_wakeref_t wakeref; + u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); + u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES + ? (XELPDP_LANE_PIPE_RESET(0) | + XELPDP_LANE_PIPE_RESET(1)) + : XELPDP_LANE_PIPE_RESET(0); + u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES + ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) | + XELPDP_LANE_PHY_CURRENT_STATUS(1)) + : XELPDP_LANE_PHY_CURRENT_STATUS(0); + u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES + ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) | + XE3PLPDP_LANE_PHY_PULSE_STATUS(1)) + : XE3PLPDP_LANE_PHY_PULSE_STATUS(0); + + wakeref = intel_lt_phy_transaction_begin(encoder); + + /* 1. Clear PORT_BUF_CTL2 [PHY Pulse Status]. */ + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_phy_pulse_status, + lane_phy_pulse_status); + + /* 2. Set PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> Pipe Reset to 1. */ + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, + lane_pipe_reset); + + /* 3. Poll for PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> PHY Current Status == 1. */ + if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_phy_current_status, + XE3PLPD_RESET_START_LATENCY_US)) + drm_warn(display->drm, "PHY %c failed to reset lane\n", + phy_name(phy)); + + /* 4. Clear for PHY pulse status on owned PHY lanes. */ + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), + lane_phy_pulse_status, + lane_phy_pulse_status); + + /* + * 5. Follow the Display Voltage Frequency Switching - + * Sequence Before Frequency Change. We handle this step in bxt_set_cdclk(). + */ + /* 6. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */ + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_LANE_PCLK_PLL_REQUEST(0), 0); + + /* 7. Program DDI_CLK_VALFREQ to 0. */ + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0); + + /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */ + if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_LANE_PCLK_PLL_ACK(0), + XE3PLPD_MACCLK_TURNOFF_LATENCY_US)) + drm_warn(display->drm, "PHY %c PLL MacCLK ack deassertion timeout\n", + phy_name(phy)); + + /* + * 9. Follow the Display Voltage Frequency Switching - + * Sequence After Frequency Change. We handle this step in bxt_set_cdclk(). + */ + /* 10. Program PORT_CLOCK_CTL register to disable and gate clocks. */ + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), + XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_FORWARD_CLOCK_UNGATE, 0); + + /* 11. Program PORT_BUF_CTL5[MacCLK Reset_0] = 1 to assert MacCLK reset. */ + intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port), + XE3PLPD_MACCLK_RESET_0, XE3PLPD_MACCLK_RESET_0); + + intel_lt_phy_transaction_end(encoder, wakeref); +} + +void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(encoder); + const struct intel_ddi_buf_trans *trans; + u8 owned_lane_mask; + intel_wakeref_t wakeref; + int n_entries, ln; + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (intel_tc_port_in_tbt_alt_mode(dig_port)) + return; + + owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); + + wakeref = intel_lt_phy_transaction_begin(encoder); + + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(display->drm, !trans)) { + intel_lt_phy_transaction_end(encoder, wakeref); + return; + } + + for (ln = 0; ln < crtc_state->lane_count; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); + int lane = ln / 2; + int tx = ln % 2; + u8 lane_mask = lane == 0 ? INTEL_LT_PHY_LANE0 : INTEL_LT_PHY_LANE1; + + if (!(lane_mask & owned_lane_mask)) + continue; + + intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL8(tx), + LT_PHY_TX_SWING_LEVEL_MASK | LT_PHY_TX_SWING_MASK, + LT_PHY_TX_SWING_LEVEL(trans->entries[level].lt.txswing_level) | + LT_PHY_TX_SWING(trans->entries[level].lt.txswing), + MB_WRITE_COMMITTED); + + intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL2(tx), + LT_PHY_TX_CURSOR_MASK, + LT_PHY_TX_CURSOR(trans->entries[level].lt.pre_cursor), + MB_WRITE_COMMITTED); + intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL3(tx), + LT_PHY_TX_CURSOR_MASK, + LT_PHY_TX_CURSOR(trans->entries[level].lt.main_cursor), + MB_WRITE_COMMITTED); + intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL4(tx), + LT_PHY_TX_CURSOR_MASK, + LT_PHY_TX_CURSOR(trans->entries[level].lt.post_cursor), + MB_WRITE_COMMITTED); + } + + intel_lt_phy_transaction_end(encoder, wakeref); +} + +void intel_lt_phy_dump_hw_state(struct intel_display *display, + const struct intel_lt_phy_pll_state *hw_state) +{ + int i, j; + + drm_dbg_kms(display->drm, "lt_phy_pll_hw_state:\n"); + for (i = 0; i < 3; i++) { + drm_dbg_kms(display->drm, "config[%d] = 0x%.4x,\n", + i, hw_state->config[i]); + } + + for (i = 0; i <= 12; i++) + for (j = 3; j >= 0; j--) + drm_dbg_kms(display->drm, "vdr_data[%d][%d] = 0x%.4x,\n", + i, j, hw_state->data[i][j]); +} + +bool +intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a, + const struct intel_lt_phy_pll_state *b) +{ + if (memcmp(&a->config, &b->config, sizeof(a->config)) != 0) + return false; + + if (memcmp(&a->data, &b->data, sizeof(a->data)) != 0) + return false; + + return true; +} + +void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + struct intel_lt_phy_pll_state *pll_state) +{ + u8 owned_lane_mask; + u8 lane; + intel_wakeref_t wakeref; + int i, j, k; + + pll_state->tbt_mode = intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)); + if (pll_state->tbt_mode) + return; + + owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); + lane = owned_lane_mask & INTEL_LT_PHY_LANE0 ? : INTEL_LT_PHY_LANE1; + wakeref = intel_lt_phy_transaction_begin(encoder); + + pll_state->config[0] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_0_CONFIG); + pll_state->config[1] = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG); + pll_state->config[2] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_2_CONFIG); + + for (i = 0; i <= 12; i++) { + for (j = 3, k = 0; j >= 0; j--, k++) + pll_state->data[i][k] = + intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, + LT_PHY_VDR_X_DATAY(i, j)); + } + + pll_state->clock = + intel_lt_phy_calc_port_clock(encoder, crtc_state); + intel_lt_phy_transaction_end(encoder, wakeref); +} + +void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_display *display = to_intel_display(state); + struct intel_digital_port *dig_port; + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_encoder *encoder; + struct intel_lt_phy_pll_state pll_hw_state = {}; + const struct intel_lt_phy_pll_state *pll_sw_state = &new_crtc_state->dpll_hw_state.ltpll; + int clock; + int i, j; + + if (DISPLAY_VER(display) < 35) + return; + + if (!new_crtc_state->hw.active) + return; + + /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */ + if (!intel_crtc_needs_modeset(new_crtc_state) && + !intel_crtc_needs_fastset(new_crtc_state)) + return; + + encoder = intel_get_crtc_new_encoder(state, new_crtc_state); + intel_lt_phy_pll_readout_hw_state(encoder, new_crtc_state, &pll_hw_state); + clock = intel_lt_phy_calc_port_clock(encoder, new_crtc_state); + + dig_port = enc_to_dig_port(encoder); + if (intel_tc_port_in_tbt_alt_mode(dig_port)) + return; + + INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.clock != clock, + "[CRTC:%d:%s] mismatch in LT PHY: Register CLOCK (expected %d, found %d)", + crtc->base.base.id, crtc->base.name, + pll_sw_state->clock, pll_hw_state.clock); + + for (i = 0; i < 3; i++) { + INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.config[i] != pll_sw_state->config[i], + "[CRTC:%d:%s] mismatch in LT PHY PLL CONFIG%d: (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + pll_sw_state->config[i], pll_hw_state.config[i]); + } + + for (i = 0; i <= 12; i++) { + for (j = 3; j >= 0; j--) + INTEL_DISPLAY_STATE_WARN(display, + pll_hw_state.data[i][j] != + pll_sw_state->data[i][j], + "[CRTC:%d:%s] mismatch in LT PHY PLL DATA[%d][%d]: (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, j, + pll_sw_state->data[i][j], pll_hw_state.data[i][j]); + } +} + +void intel_xe3plpd_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (intel_tc_port_in_tbt_alt_mode(dig_port)) + intel_mtl_tbt_pll_enable(encoder, crtc_state); + else + intel_lt_phy_pll_enable(encoder, crtc_state); +} + +void intel_xe3plpd_pll_disable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (intel_tc_port_in_tbt_alt_mode(dig_port)) + intel_mtl_tbt_pll_disable(encoder); + else + intel_lt_phy_pll_disable(encoder); + +} diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.h b/drivers/gpu/drm/i915/display/intel_lt_phy.h new file mode 100644 index 000000000000..b7911acd7dcd --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_lt_phy.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright © 2025 Intel Corporation + */ + +#ifndef __INTEL_LT_PHY_H__ +#define __INTEL_LT_PHY_H__ + +#include <linux/types.h> + +struct intel_atomic_state; +struct intel_display; +struct intel_encoder; +struct intel_crtc_state; +struct intel_crtc; +struct intel_lt_phy_pll_state; + +void intel_lt_phy_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_lt_phy_pll_disable(struct intel_encoder *encoder); +int +intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder); +int intel_lt_phy_calc_port_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_lt_phy_dump_hw_state(struct intel_display *display, + const struct intel_lt_phy_pll_state *hw_state); +bool +intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a, + const struct intel_lt_phy_pll_state *b); +void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + struct intel_lt_phy_pll_state *pll_state); +void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state, + struct intel_crtc *crtc); +int +intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state, + u32 frequency_khz); +void intel_xe3plpd_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_xe3plpd_pll_disable(struct intel_encoder *encoder); + +#define HAS_LT_PHY(display) (DISPLAY_VER(display) >= 35) + +#endif /* __INTEL_LT_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h new file mode 100644 index 000000000000..98ccc069a69b --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright © 2025 Intel Corporation + */ + +#ifndef __INTEL_LT_PHY_REGS_H__ +#define __INTEL_LT_PHY_REGS_H__ + +#define XE3PLPD_MSGBUS_TIMEOUT_FAST_US 500 +#define XE3PLPD_MACCLK_TURNON_LATENCY_MS 2 +#define XE3PLPD_MACCLK_TURNOFF_LATENCY_US 1 +#define XE3PLPD_RATE_CALIB_DONE_LATENCY_MS 1 +#define XE3PLPD_RESET_START_LATENCY_US 10 +#define XE3PLPD_PWRDN_TO_RDY_LATENCY_US 4 +#define XE3PLPD_RESET_END_LATENCY_MS 2 + +/* LT Phy MAC Register */ +#define LT_PHY_MAC_VDR _MMIO(0xC00) +#define LT_PHY_PCLKIN_GATE REG_BIT8(0) + +/* LT Phy Pipe Spec Registers */ +#define LT_PHY_TXY_CTL8(idx) (0x408 + (0x200 * (idx))) +#define LT_PHY_TX_SWING_LEVEL_MASK REG_GENMASK8(7, 4) +#define LT_PHY_TX_SWING_LEVEL(val) REG_FIELD_PREP8(LT_PHY_TX_SWING_LEVEL_MASK, val) +#define LT_PHY_TX_SWING_MASK REG_BIT8(3) +#define LT_PHY_TX_SWING(val) REG_FIELD_PREP8(LT_PHY_TX_SWING_MASK, val) + +#define LT_PHY_TXY_CTL2(idx) (0x402 + (0x200 * (idx))) +#define LT_PHY_TXY_CTL3(idx) (0x403 + (0x200 * (idx))) +#define LT_PHY_TXY_CTL4(idx) (0x404 + (0x200 * (idx))) +#define LT_PHY_TX_CURSOR_MASK REG_GENMASK8(5, 0) +#define LT_PHY_TX_CURSOR(val) REG_FIELD_PREP8(LT_PHY_TX_CURSOR_MASK, val) + +#define LT_PHY_TXY_CTL10(idx) (0x40A + (0x200 * (idx))) +#define LT_PHY_TXY_CTL10_MAC(idx) _MMIO(LT_PHY_TXY_CTL10(idx)) +#define LT_PHY_TX_LANE_ENABLE REG_BIT8(0) + +/* LT Phy Vendor Register */ +#define LT_PHY_VDR_0_CONFIG 0xC02 +#define LT_PHY_VDR_DP_PLL_ENABLE REG_BIT(7) +#define LT_PHY_VDR_1_CONFIG 0xC03 +#define LT_PHY_VDR_RATE_ENCODING_MASK REG_GENMASK8(6, 3) +#define LT_PHY_VDR_MODE_ENCODING_MASK REG_GENMASK8(2, 0) +#define LT_PHY_VDR_2_CONFIG 0xCC3 + +#define LT_PHY_VDR_X_ADDR_MSB(idx) (0xC04 + 0x6 * (idx)) +#define LT_PHY_VDR_X_ADDR_LSB(idx) (0xC05 + 0x6 * (idx)) + +#define LT_PHY_VDR_X_DATAY(idx, y) ((0xC06 + (3 - (y))) + 0x6 * (idx)) + +#define LT_PHY_RATE_UPDATE 0xCC4 +#define LT_PHY_RATE_CONTROL_VDR_UPDATE REG_BIT8(0) + +#define _XE3PLPD_PORT_BUF_CTL5(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \ + _XELPDP_PORT_BUF_CTL1_LN0_A, \ + _XELPDP_PORT_BUF_CTL1_LN0_B, \ + _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \ + _XELPDP_PORT_BUF_CTL1_LN0_USBC2) \ + + 0x34) +#define XE3PLPD_PORT_BUF_CTL5(port) _XE3PLPD_PORT_BUF_CTL5(__xe2lpd_port_idx(port)) +#define XE3PLPD_MACCLK_RESET_0 REG_BIT(11) +#define XE3PLPD_MACCLK_RATE_MASK REG_GENMASK(4, 0) +#define XE3PLPD_MACCLK_RATE_DEF REG_FIELD_PREP(XE3PLPD_MACCLK_RATE_MASK, 0x1F) + +#define _XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) \ + + 0x60 + (lane) * 0x4) +#define XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(port, lane) _XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(__xe2lpd_port_idx(port), \ + lane) +#define XE3LPD_PORT_P2M_ADDR_MASK REG_GENMASK(11, 0) + +#define PLL_REG4_ADDR 0x8510 +#define PLL_REG3_ADDR 0x850C +#define PLL_REG5_ADDR 0x8514 +#define PLL_REG57_ADDR 0x85E4 +#define PLL_LF_ADDR 0x860C +#define PLL_TDC_ADDR 0x8610 +#define PLL_SSC_ADDR 0x8614 +#define PLL_BIAS2_ADDR 0x8618 +#define PLL_BIAS_TRIM_ADDR 0x8648 +#define PLL_DCO_MED_ADDR 0x8640 +#define PLL_DCO_FINE_ADDR 0x864C +#define PLL_SSC_INJ_ADDR 0x8624 +#define PLL_SURV_BONUS_ADDR 0x8644 +#define PLL_TYPE_OFFSET 0x200 +#define PLL_REG_ADDR(base, pll_type) ((pll_type) ? (base) + PLL_TYPE_OFFSET : (base)) +#endif /* __INTEL_LT_PHY_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 48f4d8ed4f15..89aeb4fb340e 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -329,7 +329,7 @@ static void intel_enable_lvds(struct intel_atomic_state *state, intel_de_rmw(display, PP_CONTROL(display, 0), 0, PANEL_POWER_ON); intel_de_posting_read(display, lvds_encoder->reg); - if (intel_de_wait_for_set(display, PP_STATUS(display, 0), PP_ON, 5000)) + if (intel_de_wait_for_set_ms(display, PP_STATUS(display, 0), PP_ON, 5000)) drm_err(display->drm, "timed out waiting for panel to power on\n"); @@ -345,7 +345,7 @@ static void intel_disable_lvds(struct intel_atomic_state *state, struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); intel_de_rmw(display, PP_CONTROL(display, 0), PANEL_POWER_ON, 0); - if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_ON, 1000)) + if (intel_de_wait_for_clear_ms(display, PP_STATUS(display, 0), PP_ON, 1000)) drm_err(display->drm, "timed out waiting for panel to power off\n"); @@ -384,7 +384,7 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000)) + if (intel_de_wait_for_clear_ms(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000)) drm_err(display->drm, "timed out waiting for panel power cycle delay\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 8415f3d703ed..0dcb0597879a 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -19,6 +19,7 @@ #include "intel_color.h" #include "intel_crtc.h" #include "intel_crtc_state_dump.h" +#include "intel_dbuf_bw.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display.h" @@ -176,6 +177,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc) intel_cdclk_crtc_disable_noatomic(crtc); skl_wm_crtc_disable_noatomic(crtc); intel_bw_crtc_disable_noatomic(crtc); + intel_dbuf_bw_crtc_disable_noatomic(crtc); intel_pmdemand_update_port_clock(display, pmdemand_state, pipe, 0); } @@ -851,18 +853,23 @@ static void intel_modeset_readout_hw_state(struct intel_display *display) */ if (plane_state->uapi.visible && plane->min_cdclk) { if (crtc_state->double_wide || DISPLAY_VER(display) >= 10) - crtc_state->min_cdclk[plane->id] = + crtc_state->plane_min_cdclk[plane->id] = DIV_ROUND_UP(crtc_state->pixel_rate, 2); else - crtc_state->min_cdclk[plane->id] = + crtc_state->plane_min_cdclk[plane->id] = crtc_state->pixel_rate; } drm_dbg_kms(display->drm, "[PLANE:%d:%s] min_cdclk %d kHz\n", plane->base.base.id, plane->base.name, - crtc_state->min_cdclk[plane->id]); + crtc_state->plane_min_cdclk[plane->id]); } + crtc_state->min_cdclk = intel_crtc_min_cdclk(crtc_state); + + drm_dbg_kms(display->drm, "[CRTC:%d:%s] min_cdclk %d kHz\n", + crtc->base.base.id, crtc->base.name, crtc_state->min_cdclk); + intel_pmdemand_update_port_clock(display, pmdemand_state, pipe, crtc_state->port_clock); } @@ -872,6 +879,7 @@ static void intel_modeset_readout_hw_state(struct intel_display *display) intel_wm_get_hw_state(display); intel_bw_update_hw_state(display); + intel_dbuf_bw_update_hw_state(display); intel_cdclk_update_hw_state(display); intel_pmdemand_init_pmdemand_params(display, pmdemand_state); diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c index f2f6b9d9afa1..b361a77cd235 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c @@ -16,6 +16,7 @@ #include "intel_display_core.h" #include "intel_display_types.h" #include "intel_fdi.h" +#include "intel_lt_phy.h" #include "intel_modeset_verify.h" #include "intel_snps_phy.h" #include "skl_watermark.h" @@ -246,6 +247,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state, intel_dpll_state_verify(state, crtc); intel_mpllb_state_verify(state, crtc); intel_cx0pll_state_verify(state, crtc); + intel_lt_phy_pll_state_verify(state, crtc); } void intel_modeset_verify_disabled(struct intel_atomic_state *state) diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 272f9e7af4d4..88eb7ae5765c 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -27,6 +27,7 @@ */ #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include "gem/i915_gem_internal.h" #include "gem/i915_gem_object_frontbuffer.h" @@ -307,8 +308,6 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay, intel_frontbuffer_put(overlay->frontbuffer); overlay->frontbuffer = frontbuffer; - intel_frontbuffer_flip_prepare(display, INTEL_FRONTBUFFER_OVERLAY(pipe)); - overlay->old_vma = overlay->vma; if (vma) overlay->vma = i915_vma_get(vma); @@ -365,7 +364,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay) if (drm_WARN_ON(display->drm, !vma)) return; - intel_frontbuffer_flip_complete(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); + intel_frontbuffer_flip(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); i915_vma_unpin(vma); i915_vma_put(vma); @@ -821,8 +820,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_pin_section; } - i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB); - if (!overlay->active) { const struct intel_crtc_state *crtc_state = overlay->crtc->config; diff --git a/drivers/gpu/drm/i915/display/intel_pch.c b/drivers/gpu/drm/i915/display/intel_pch.c index 469e8a3cfb49..65359a36df48 100644 --- a/drivers/gpu/drm/i915/display/intel_pch.c +++ b/drivers/gpu/drm/i915/display/intel_pch.c @@ -5,8 +5,8 @@ #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_display_core.h" +#include "intel_display_utils.h" #include "intel_pch.h" #define INTEL_PCH_DEVICE_ID_MASK 0xff80 @@ -328,7 +328,7 @@ void intel_pch_detect(struct intel_display *display) "Display disabled, reverting to NOP PCH\n"); display->pch_type = PCH_NOP; } else if (!pch) { - if (i915_run_as_guest() && HAS_DISPLAY(display)) { + if (intel_display_run_as_guest(display) && HAS_DISPLAY(display)) { intel_virt_detect_pch(display, &id, &pch_type); display->pch_type = pch_type; } else { diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index 3456c794e0e7..16619f7be5f8 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -305,7 +305,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) } intel_de_write(display, reg, val | TRANS_ENABLE); - if (intel_de_wait_for_set(display, reg, TRANS_STATE_ENABLE, 100)) + if (intel_de_wait_for_set_ms(display, reg, TRANS_STATE_ENABLE, 100)) drm_err(display->drm, "failed to enable transcoder %c\n", pipe_name(pipe)); } @@ -326,7 +326,7 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) reg = PCH_TRANSCONF(pipe); intel_de_rmw(display, reg, TRANS_ENABLE, 0); /* wait for PCH transcoder off, transcoder state */ - if (intel_de_wait_for_clear(display, reg, TRANS_STATE_ENABLE, 50)) + if (intel_de_wait_for_clear_ms(display, reg, TRANS_STATE_ENABLE, 50)) drm_err(display->drm, "failed to disable transcoder %c\n", pipe_name(pipe)); @@ -572,8 +572,8 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) val |= TRANS_INTERLACE_PROGRESSIVE; intel_de_write(display, LPT_TRANSCONF, val); - if (intel_de_wait_for_set(display, LPT_TRANSCONF, - TRANS_STATE_ENABLE, 100)) + if (intel_de_wait_for_set_ms(display, LPT_TRANSCONF, + TRANS_STATE_ENABLE, 100)) drm_err(display->drm, "Failed to enable PCH transcoder\n"); } @@ -581,8 +581,8 @@ static void lpt_disable_pch_transcoder(struct intel_display *display) { intel_de_rmw(display, LPT_TRANSCONF, TRANS_ENABLE, 0); /* wait for PCH transcoder off, transcoder state */ - if (intel_de_wait_for_clear(display, LPT_TRANSCONF, - TRANS_STATE_ENABLE, 50)) + if (intel_de_wait_for_clear_ms(display, LPT_TRANSCONF, + TRANS_STATE_ENABLE, 50)) drm_err(display->drm, "Failed to disable PCH transcoder\n"); /* Workaround: clear timing override bit. */ diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c index 9ae53679a041..9a89bb6dcf65 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -6,10 +6,10 @@ #include <drm/drm_print.h> #include "i915_reg.h" -#include "i915_utils.h" #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_panel.h" #include "intel_pch_refclk.h" #include "intel_sbi.h" @@ -21,17 +21,15 @@ static void lpt_fdi_reset_mphy(struct intel_display *display) intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL); - ret = intel_de_wait_custom(display, SOUTH_CHICKEN2, - FDI_MPHY_IOSFSB_RESET_STATUS, FDI_MPHY_IOSFSB_RESET_STATUS, - 100, 0, NULL); + ret = intel_de_wait_for_set_us(display, SOUTH_CHICKEN2, + FDI_MPHY_IOSFSB_RESET_STATUS, 100); if (ret) drm_err(display->drm, "FDI mPHY reset assert timeout\n"); intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0); - ret = intel_de_wait_custom(display, SOUTH_CHICKEN2, - FDI_MPHY_IOSFSB_RESET_STATUS, 0, - 100, 0, NULL); + ret = intel_de_wait_for_clear_us(display, SOUTH_CHICKEN2, + FDI_MPHY_IOSFSB_RESET_STATUS, 100); if (ret) drm_err(display->drm, "FDI mPHY reset de-assert timeout\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c index 68539e7c2a24..6dda496190e0 100644 --- a/drivers/gpu/drm/i915/display/intel_pfit.c +++ b/drivers/gpu/drm/i915/display/intel_pfit.c @@ -5,12 +5,12 @@ #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_de.h" #include "intel_display_core.h" #include "intel_display_driver.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_lvds_regs.h" #include "intel_pfit.h" #include "intel_pfit_regs.h" diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index c2b4b2254190..1f27643412f1 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -28,6 +28,8 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_irq.h" #include "intel_atomic.h" diff --git a/drivers/gpu/drm/i915/display/intel_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c index 2329f09d413d..5105e3278bc4 100644 --- a/drivers/gpu/drm/i915/display/intel_plane.c +++ b/drivers/gpu/drm/i915/display/intel_plane.c @@ -43,9 +43,9 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_panic.h> +#include <drm/drm_print.h> #include "gem/i915_gem_object.h" -#include "i915_scheduler_types.h" #include "i9xx_plane_regs.h" #include "intel_cdclk.h" #include "intel_cursor.h" @@ -292,64 +292,21 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, rel_data_rate); } -int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, - struct intel_plane *plane, - bool *need_cdclk_calc) +static void intel_plane_calc_min_cdclk(struct intel_atomic_state *state, + struct intel_plane *plane) { - struct intel_display *display = to_intel_display(plane); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); - const struct intel_cdclk_state *cdclk_state; - const struct intel_crtc_state *old_crtc_state; struct intel_crtc_state *new_crtc_state; if (!plane_state->uapi.visible || !plane->min_cdclk) - return 0; + return; - old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - new_crtc_state->min_cdclk[plane->id] = + new_crtc_state->plane_min_cdclk[plane->id] = plane->min_cdclk(new_crtc_state, plane_state); - - /* - * No need to check against the cdclk state if - * the min cdclk for the plane doesn't increase. - * - * Ie. we only ever increase the cdclk due to plane - * requirements. This can reduce back and forth - * display blinking due to constant cdclk changes. - */ - if (new_crtc_state->min_cdclk[plane->id] <= - old_crtc_state->min_cdclk[plane->id]) - return 0; - - cdclk_state = intel_atomic_get_cdclk_state(state); - if (IS_ERR(cdclk_state)) - return PTR_ERR(cdclk_state); - - /* - * No need to recalculate the cdclk state if - * the min cdclk for the pipe doesn't increase. - * - * Ie. we only ever increase the cdclk due to plane - * requirements. This can reduce back and forth - * display blinking due to constant cdclk changes. - */ - if (new_crtc_state->min_cdclk[plane->id] <= - intel_cdclk_min_cdclk(cdclk_state, crtc->pipe)) - return 0; - - drm_dbg_kms(display->drm, - "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n", - plane->base.base.id, plane->base.name, - new_crtc_state->min_cdclk[plane->id], - crtc->base.base.id, crtc->base.name, - intel_cdclk_min_cdclk(cdclk_state, crtc->pipe)); - *need_cdclk_calc = true; - - return 0; } static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state) @@ -435,7 +392,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, crtc_state->data_rate_y[plane->id] = 0; crtc_state->rel_data_rate[plane->id] = 0; crtc_state->rel_data_rate_y[plane->id] = 0; - crtc_state->min_cdclk[plane->id] = 0; + crtc_state->plane_min_cdclk[plane->id] = 0; plane_state->uapi.visible = false; } @@ -1094,6 +1051,9 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) DISPLAY_VERx100(display) == 3002) && src_x % 2 != 0) hsub = 2; + + if (DISPLAY_VER(display) == 35) + vsub = 2; } else { hsub = fb->format->hsub; vsub = fb->format->vsub; @@ -1172,7 +1132,6 @@ static int intel_prepare_plane_fb(struct drm_plane *_plane, struct drm_plane_state *_new_plane_state) { - struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY }; struct intel_plane *plane = to_intel_plane(_plane); struct intel_display *display = to_intel_display(plane); struct intel_plane_state *new_plane_state = @@ -1221,8 +1180,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, goto unpin_fb; if (new_plane_state->uapi.fence) { - i915_gem_fence_wait_priority(new_plane_state->uapi.fence, - &attr); + i915_gem_fence_wait_priority_display(new_plane_state->uapi.fence); intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, new_plane_state->uapi.fence); @@ -1746,5 +1704,8 @@ int intel_plane_atomic_check(struct intel_atomic_state *state) return ret; } + for_each_new_intel_plane_in_state(state, plane, plane_state, i) + intel_plane_calc_min_cdclk(state, plane); + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h index 8af41ccc0a69..4e99df9de3e8 100644 --- a/drivers/gpu/drm/i915/display/intel_plane.h +++ b/drivers/gpu/drm/i915/display/intel_plane.h @@ -69,9 +69,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *intel_state); -int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, - struct intel_plane *plane, - bool *need_cdclk_calc); int intel_plane_check_clipping(struct intel_plane_state *plane_state, struct intel_crtc_state *crtc_state, int min_scale, int max_scale, diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index a9f36b1b50c1..a1de1ec564d1 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -3,6 +3,8 @@ * Copyright © 2021 Intel Corporation */ +#include <drm/drm_print.h> + #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "i915_drv.h" @@ -131,6 +133,7 @@ initial_plane_vma(struct intel_display *display, struct drm_mm_node orig_mm = {}; struct i915_vma *vma; resource_size_t phys_base; + unsigned int tiling; u32 base, size; u64 pinctl; @@ -177,17 +180,19 @@ initial_plane_vma(struct intel_display *display, i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE); - switch (plane_config->tiling) { + tiling = intel_fb_modifier_to_tiling(plane_config->fb->base.modifier); + + switch (tiling) { case I915_TILING_NONE: break; case I915_TILING_X: case I915_TILING_Y: obj->tiling_and_stride = plane_config->fb->base.pitches[0] | - plane_config->tiling; + tiling; break; default: - MISSING_CASE(plane_config->tiling); + MISSING_CASE(tiling); goto err_obj; } @@ -372,7 +377,7 @@ valid_fb: plane_state->uapi.crtc_w = fb->width; plane_state->uapi.crtc_h = fb->height; - if (plane_config->tiling) + if (fb->modifier != DRM_FORMAT_MOD_LINEAR) dev_priv->preserve_bios_swizzle = true; plane_state->uapi.fb = fb; diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c index d806c15db7ce..dc44a7a169c1 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c @@ -7,13 +7,14 @@ #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_atomic.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_de.h" +#include "intel_display_jiffies.h" #include "intel_display_regs.h" #include "intel_display_trace.h" +#include "intel_display_utils.h" #include "intel_pmdemand.h" #include "intel_step.h" #include "skl_watermark.h" @@ -389,12 +390,12 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) static bool intel_pmdemand_check_prev_transaction(struct intel_display *display) { - return !(intel_de_wait_for_clear(display, - XELPDP_INITIATE_PMDEMAND_REQUEST(1), - XELPDP_PMDEMAND_REQ_ENABLE, 10) || - intel_de_wait_for_clear(display, - GEN12_DCPR_STATUS_1, - XELPDP_PMDEMAND_INFLIGHT_STATUS, 10)); + return !(intel_de_wait_for_clear_ms(display, + XELPDP_INITIATE_PMDEMAND_REQUEST(1), + XELPDP_PMDEMAND_REQ_ENABLE, 10) || + intel_de_wait_for_clear_ms(display, + GEN12_DCPR_STATUS_1, + XELPDP_PMDEMAND_INFLIGHT_STATUS, 10)); } void @@ -461,9 +462,9 @@ static void intel_pmdemand_poll(struct intel_display *display) u32 status; int ret; - ret = intel_de_wait_custom(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), - XELPDP_PMDEMAND_REQ_ENABLE, 0, - 50, timeout_ms, &status); + ret = intel_de_wait_ms(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), + XELPDP_PMDEMAND_REQ_ENABLE, 0, + timeout_ms, &status); if (ret == -ETIMEDOUT) drm_err(display->drm, diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 327e0de86f1e..25692a547764 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -10,11 +10,12 @@ #include "g4x_dp.h" #include "i915_reg.h" -#include "i915_utils.h" #include "intel_de.h" +#include "intel_display_jiffies.h" #include "intel_display_power_well.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 10eb93a34cf2..08bca4573974 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -26,6 +26,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_debugfs.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "i915_reg.h" @@ -39,6 +40,7 @@ #include "intel_display_regs.h" #include "intel_display_rpm.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dmc.h" #include "intel_dp.h" #include "intel_dp_aux.h" @@ -50,6 +52,7 @@ #include "intel_snps_phy.h" #include "intel_step.h" #include "intel_vblank.h" +#include "intel_vdsc.h" #include "intel_vrr.h" #include "skl_universal_plane.h" @@ -580,11 +583,53 @@ exit: intel_dp->psr.su_y_granularity = y; } +static enum intel_panel_replay_dsc_support +compute_pr_dsc_support(struct intel_dp *intel_dp) +{ + u8 pr_dsc_mode; + u8 val; + + val = intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)]; + pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val); + + switch (pr_dsc_mode) { + case DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY: + return INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY; + case DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED: + return INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE; + default: + MISSING_CASE(pr_dsc_mode); + fallthrough; + case DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED: + case DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED: + return INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED; + } +} + +static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support) +{ + switch (dsc_support) { + case INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED: + return "not supported"; + case INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY: + return "full frame only"; + case INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE: + return "selective update"; + default: + MISSING_CASE(dsc_support); + return "n/a"; + }; +} + static void _panel_replay_init_dpcd(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); int ret; + /* TODO: Enable Panel Replay on MST once it's properly implemented. */ + if (intel_dp->mst_detect == DRM_DP_MST) + return; + ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT, &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd)); if (ret < 0) @@ -615,10 +660,13 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp) DP_PANEL_REPLAY_SU_SUPPORT) intel_dp->psr.sink_panel_replay_su_support = true; + intel_dp->psr.sink_panel_replay_dsc_support = compute_pr_dsc_support(intel_dp); + drm_dbg_kms(display->drm, - "Panel replay %sis supported by panel\n", + "Panel replay %sis supported by panel (in DSC mode: %s)\n", intel_dp->psr.sink_panel_replay_su_support ? - "selective_update " : ""); + "selective_update " : "", + panel_replay_dsc_support_str(intel_dp->psr.sink_panel_replay_dsc_support)); } static void _psr_init_dpcd(struct intel_dp *intel_dp) @@ -888,7 +936,8 @@ static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); u32 current_dc_state = intel_display_power_get_current_dc_state(display); - struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe]; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe); + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base); return (current_dc_state != DC_STATE_EN_UPTO_DC5 && current_dc_state != DC_STATE_EN_UPTO_DC6) || @@ -956,15 +1005,16 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) return val; } -static int psr2_block_count_lines(struct intel_dp *intel_dp) +static int +psr2_block_count_lines(u8 io_wake_lines, u8 fast_wake_lines) { - return intel_dp->alpm_parameters.io_wake_lines < 9 && - intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12; + return io_wake_lines < 9 && fast_wake_lines < 9 ? 8 : 12; } static int psr2_block_count(struct intel_dp *intel_dp) { - return psr2_block_count_lines(intel_dp) / 4; + return psr2_block_count_lines(intel_dp->psr.io_wake_lines, + intel_dp->psr.fast_wake_lines) / 4; } static u8 frames_before_su_entry(struct intel_dp *intel_dp) @@ -1059,20 +1109,20 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) */ int tmp; - tmp = map[intel_dp->alpm_parameters.io_wake_lines - + tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES); - tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; + tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES); } else if (DISPLAY_VER(display) >= 20) { - val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines); + val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); } else if (DISPLAY_VER(display) >= 12) { - val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines); - val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines); + val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); + val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } else if (DISPLAY_VER(display) >= 9) { - val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines); - val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines); + val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); + val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } if (intel_dp->psr.req_psr2_sdp_prior_scanline) @@ -1251,12 +1301,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, return false; } - if (crtc_state->uapi.async_flip) { - drm_dbg_kms(display->drm, - "PSR2 sel fetch not enabled, async flip enabled\n"); - return false; - } - return crtc_state->enable_psr2_sel_fetch = true; } @@ -1360,22 +1404,54 @@ static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp, return entry_setup_frames; } -static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - bool aux_less) +static +int _intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state, + bool needs_panel_replay, + bool needs_sel_update) { - struct intel_display *display = to_intel_display(intel_dp); - int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end - - crtc_state->hw.adjusted_mode.crtc_vblank_start; - int wake_lines; + struct intel_display *display = to_intel_display(crtc_state); - if (aux_less) - wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines; + if (!crtc_state->has_psr) + return 0; + + /* Wa_14015401596 */ + if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14)) + return 1; + + /* Rest is for SRD_STATUS needed on LunarLake and onwards */ + if (DISPLAY_VER(display) < 20) + return 0; + + /* + * Comment on SRD_STATUS register in Bspec for LunarLake and onwards: + * + * To deterministically capture the transition of the state machine + * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least + * one line after the non-delayed V. Blank. + * + * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0 + * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ] + * - TRANS_VTOTAL[ Vertical Active ]) + * + * SRD_STATUS is used only by PSR1 on PantherLake. + * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake. + */ + + if (DISPLAY_VER(display) >= 30 && (needs_panel_replay || + needs_sel_update)) + return 0; + else if (DISPLAY_VER(display) < 30 && (needs_sel_update || + intel_crtc_has_type(crtc_state, + INTEL_OUTPUT_EDP))) + return 0; else - wake_lines = DISPLAY_VER(display) < 20 ? - psr2_block_count_lines(intel_dp) : - intel_dp->alpm_parameters.io_wake_lines; + return 1; +} +static bool _wake_lines_fit_into_vblank(const struct intel_crtc_state *crtc_state, + int vblank, + int wake_lines) +{ if (crtc_state->req_psr2_sdp_prior_scanline) vblank -= 1; @@ -1386,9 +1462,46 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp, return true; } +static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + bool aux_less, + bool needs_panel_replay, + bool needs_sel_update) +{ + struct intel_display *display = to_intel_display(intel_dp); + int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end - + crtc_state->hw.adjusted_mode.crtc_vblank_start; + int wake_lines; + int scl = _intel_psr_min_set_context_latency(crtc_state, + needs_panel_replay, + needs_sel_update); + vblank -= scl; + + if (aux_less) + wake_lines = crtc_state->alpm_state.aux_less_wake_lines; + else + wake_lines = DISPLAY_VER(display) < 20 ? + psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines, + crtc_state->alpm_state.fast_wake_lines) : + crtc_state->alpm_state.io_wake_lines; + + /* + * Guardband has not been computed yet, so we conservatively check if the + * full vblank duration is sufficient to accommodate wake line requirements + * for PSR features like Panel Replay and Selective Update. + * + * Once the actual guardband is available, a more accurate validation is + * performed in intel_psr_compute_config_late(), and PSR features are + * disabled if wake lines exceed the available guardband. + */ + return _wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines); +} + static bool alpm_config_valid(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - bool aux_less) + struct intel_crtc_state *crtc_state, + bool aux_less, + bool needs_panel_replay, + bool needs_sel_update) { struct intel_display *display = to_intel_display(intel_dp); @@ -1398,7 +1511,8 @@ static bool alpm_config_valid(struct intel_dp *intel_dp, return false; } - if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) { + if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less, + needs_panel_replay, needs_sel_update)) { drm_dbg_kms(display->drm, "PSR2/Panel Replay not enabled, too short vblank time\n"); return false; @@ -1490,7 +1604,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - if (!alpm_config_valid(intel_dp, crtc_state, false)) + if (!alpm_config_valid(intel_dp, crtc_state, false, false, true)) return false; if (!crtc_state->enable_psr2_sel_fetch && @@ -1535,9 +1649,21 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp, goto unsupported; } - if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 || - !intel_dp->psr.sink_panel_replay_su_support)) - goto unsupported; + if (crtc_state->has_panel_replay) { + if (DISPLAY_VER(display) < 14) + goto unsupported; + + if (!intel_dp->psr.sink_panel_replay_su_support) + goto unsupported; + + if (intel_dsc_enabled_on_link(crtc_state) && + intel_dp->psr.sink_panel_replay_dsc_support != + INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) { + drm_dbg_kms(display->drm, + "Selective update with Panel Replay not enabled because it's not supported with DSC\n"); + goto unsupported; + } + } if (crtc_state->crc_enabled) { drm_dbg_kms(display->drm, @@ -1582,6 +1708,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp, if (entry_setup_frames >= 0) { intel_dp->psr.entry_setup_frames = entry_setup_frames; } else { + crtc_state->no_psr_reason = "PSR setup timing not met"; drm_dbg_kms(display->drm, "PSR condition failed: PSR setup timing not met\n"); return false; @@ -1592,7 +1719,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp, static bool _panel_replay_compute_config(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, + struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_display *display = to_intel_display(intel_dp); @@ -1614,6 +1741,14 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, return false; } + if (intel_dsc_enabled_on_link(crtc_state) && + intel_dp->psr.sink_panel_replay_dsc_support == + INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) { + drm_dbg_kms(display->drm, + "Panel Replay not enabled because it's not supported with DSC\n"); + return false; + } + if (!intel_dp_is_edp(intel_dp)) return true; @@ -1641,7 +1776,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, return false; } - if (!alpm_config_valid(intel_dp, crtc_state, true)) + if (!alpm_config_valid(intel_dp, crtc_state, true, true, false)) return false; return true; @@ -1656,15 +1791,40 @@ static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp, !crtc_state->has_sel_update); } +static +void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); + struct intel_crtc *crtc; + u8 active_pipes = 0; + + /* Wa_16025596647 */ + if (DISPLAY_VER(display) != 20 && + !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) + return; + + /* Not needed by Panel Replay */ + if (crtc_state->has_panel_replay) + return; + + /* We ignore possible secondary PSR/Panel Replay capable eDP */ + for_each_intel_crtc(display->drm, crtc) + active_pipes |= crtc->active ? BIT(crtc->pipe) : 0; + + active_pipes = intel_calc_active_pipes(state, active_pipes); + + crtc_state->active_non_psr_pipes = active_pipes & + ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe); +} + void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct intel_display *display = to_intel_display(intel_dp); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); - struct intel_crtc *crtc; - u8 active_pipes = 0; if (!psr_global_enabled(intel_dp)) { drm_dbg_kms(display->drm, "PSR disabled by flag\n"); @@ -1694,6 +1854,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, return; } + /* Only used for state verification. */ + crtc_state->panel_replay_dsc_support = intel_dp->psr.sink_panel_replay_dsc_support; crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp, crtc_state, conn_state); @@ -1705,31 +1867,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, return; crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state); - - /* Wa_18037818876 */ - if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) { - crtc_state->has_psr = false; - drm_dbg_kms(display->drm, - "PSR disabled to workaround PSR FSM hang issue\n"); - } - - /* Rest is for Wa_16025596647 */ - if (DISPLAY_VER(display) != 20 && - !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) - return; - - /* Not needed by Panel Replay */ - if (crtc_state->has_panel_replay) - return; - - /* We ignore possible secondary PSR/Panel Replay capable eDP */ - for_each_intel_crtc(display->drm, crtc) - active_pipes |= crtc->active ? BIT(crtc->pipe) : 0; - - active_pipes = intel_calc_active_pipes(state, active_pipes); - - crtc_state->active_non_psr_pipes = active_pipes & - ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe); } void intel_psr_get_config(struct intel_encoder *encoder, @@ -1813,6 +1950,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp) hsw_activate_psr1(intel_dp); intel_dp->psr.active = true; + intel_dp->psr.no_psr_reason = NULL; } /* @@ -2022,6 +2160,8 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, crtc_state->req_psr2_sdp_prior_scanline; intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes; intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used; + intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines; + intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines; if (!psr_interrupt_error_check(intel_dp)) return; @@ -2131,8 +2271,8 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp) } /* Wait till PSR is idle */ - if (intel_de_wait_for_clear(display, psr_status, - psr_status_mask, 2000)) + if (intel_de_wait_for_clear_ms(display, psr_status, + psr_status_mask, 2000)) drm_err(display->drm, "Timed out waiting PSR idle state\n"); } @@ -2360,50 +2500,17 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb, } /** - * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR + * intel_psr_min_set_context_latency - Minimum 'set context latency' lines needed by PSR * @crtc_state: the crtc state * - * Return minimum vblank delay needed by PSR. + * Return minimum SCL lines/delay needed by PSR. */ -int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state) +int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state) { - struct intel_display *display = to_intel_display(crtc_state); - - if (!crtc_state->has_psr) - return 0; - - /* Wa_14015401596 */ - if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14)) - return 1; - /* Rest is for SRD_STATUS needed on LunarLake and onwards */ - if (DISPLAY_VER(display) < 20) - return 0; - - /* - * Comment on SRD_STATUS register in Bspec for LunarLake and onwards: - * - * To deterministically capture the transition of the state machine - * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least - * one line after the non-delayed V. Blank. - * - * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0 - * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ] - * - TRANS_VTOTAL[ Vertical Active ]) - * - * SRD_STATUS is used only by PSR1 on PantherLake. - * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake. - */ - - if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay || - crtc_state->has_sel_update)) - return 0; - else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update || - intel_crtc_has_type(crtc_state, - INTEL_OUTPUT_EDP))) - return 0; - else - return 1; + return _intel_psr_min_set_context_latency(crtc_state, + crtc_state->has_panel_replay, + crtc_state->has_sel_update); } static u32 man_trk_ctl_enable_bit_get(struct intel_display *display) @@ -2925,6 +3032,9 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, mutex_lock(&psr->lock); + if (!new_crtc_state->has_psr) + psr->no_psr_reason = new_crtc_state->no_psr_reason; + if (psr->enabled) { /* * Reasons to disable: @@ -2951,6 +3061,20 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, } } +static void +verify_panel_replay_dsc_state(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + if (!crtc_state->has_panel_replay) + return; + + drm_WARN_ON(display->drm, + intel_dsc_enabled_on_link(crtc_state) && + crtc_state->panel_replay_dsc_support == + INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED); +} + void intel_psr_post_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -2962,6 +3086,8 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state, if (!crtc_state->has_psr) return; + verify_panel_replay_dsc_state(crtc_state); + for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2973,12 +3099,19 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state, drm_WARN_ON(display->drm, psr->enabled && !crtc_state->active_planes); - keep_disabled |= psr->sink_not_reliable; - keep_disabled |= !crtc_state->active_planes; + if (psr->sink_not_reliable) + keep_disabled = true; + + if (!crtc_state->active_planes) { + psr->no_psr_reason = "All planes inactive"; + keep_disabled = true; + } /* Display WA #1136: skl, bxt */ - keep_disabled |= DISPLAY_VER(display) < 11 && - crtc_state->wm_level_disabled; + if (DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled) { + psr->no_psr_reason = "Workaround #1136 for skl, bxt"; + keep_disabled = true; + } if (!psr->enabled && !keep_disabled) intel_psr_enable_locked(intel_dp, crtc_state); @@ -3027,7 +3160,7 @@ _psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state return true; } - return intel_de_wait_for_clear(display, + return intel_de_wait_for_clear_ms(display, EDP_PSR2_STATUS(display, cpu_transcoder), EDP_PSR2_STATUS_STATE_DEEP_SLEEP, PSR_IDLE_TIMEOUT_MS); @@ -3047,7 +3180,7 @@ _psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state return true; } - return intel_de_wait_for_clear(display, + return intel_de_wait_for_clear_ms(display, psr_status_reg(display, cpu_transcoder), EDP_PSR_STATUS_STATE_MASK, PSR_IDLE_TIMEOUT_MS); @@ -3125,7 +3258,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) mutex_unlock(&intel_dp->psr.lock); - err = intel_de_wait_for_clear(display, reg, mask, 50); + err = intel_de_wait_for_clear_ms(display, reg, mask, 50); if (err) drm_err(display->drm, "Timed out waiting for PSR Idle for re-enable\n"); @@ -3991,6 +4124,8 @@ static void intel_psr_sink_capability(struct intel_dp *intel_dp, seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support)); seq_printf(m, ", Panel Replay Selective Update = %s", str_yes_no(psr->sink_panel_replay_su_support)); + seq_printf(m, ", Panel Replay DSC support = %s", + panel_replay_dsc_support_str(psr->sink_panel_replay_dsc_support)); if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT) seq_printf(m, " (Early Transport)"); @@ -4025,6 +4160,8 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp, region_et = ""; seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et); + if (psr->no_psr_reason) + seq_printf(m, " %s\n", psr->no_psr_reason); } static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) @@ -4322,3 +4459,84 @@ bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp, { return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay; } + +void intel_psr_compute_config_late(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(intel_dp); + int vblank = intel_crtc_vblank_length(crtc_state); + int wake_lines; + + if (intel_psr_needs_alpm_aux_less(intel_dp, crtc_state)) + wake_lines = crtc_state->alpm_state.aux_less_wake_lines; + else if (intel_psr_needs_alpm(intel_dp, crtc_state)) + wake_lines = DISPLAY_VER(display) < 20 ? + psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines, + crtc_state->alpm_state.fast_wake_lines) : + crtc_state->alpm_state.io_wake_lines; + else + wake_lines = 0; + + /* + * Disable the PSR features if wake lines exceed the available vblank. + * Though SCL is computed based on these PSR features, it is not reset + * even if the PSR features are disabled to avoid changing vblank start + * at this stage. + */ + if (wake_lines && !_wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines)) { + drm_dbg_kms(display->drm, + "Adjusting PSR/PR mode: vblank too short for wake lines = %d\n", + wake_lines); + + if (crtc_state->has_panel_replay) { + crtc_state->has_panel_replay = false; + /* + * #TODO : Add fall back to PSR/PSR2 + * Since panel replay cannot be supported, we can fall back to PSR/PSR2. + * This will require calling compute_config for psr and psr2 with check for + * actual guardband instead of vblank_length. + */ + crtc_state->has_psr = false; + } + + crtc_state->has_sel_update = false; + crtc_state->enable_psr2_su_region_et = false; + crtc_state->enable_psr2_sel_fetch = false; + } + + /* Wa_18037818876 */ + if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) { + crtc_state->has_psr = false; + drm_dbg_kms(display->drm, + "PSR disabled to workaround PSR FSM hang issue\n"); + } + + intel_psr_set_non_psr_pipes(intel_dp, crtc_state); +} + +int intel_psr_min_guardband(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + int psr_min_guardband; + int wake_lines; + + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return 0; + + if (crtc_state->has_panel_replay) + wake_lines = crtc_state->alpm_state.aux_less_wake_lines; + else if (crtc_state->has_sel_update) + wake_lines = DISPLAY_VER(display) < 20 ? + psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines, + crtc_state->alpm_state.fast_wake_lines) : + crtc_state->alpm_state.io_wake_lines; + else + return 0; + + psr_min_guardband = wake_lines + crtc_state->set_context_latency; + + if (crtc_state->req_psr2_sdp_prior_scanline) + psr_min_guardband++; + + return psr_min_guardband; +} diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 077751aa599f..620b35928832 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -77,11 +77,14 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state); void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb, struct intel_atomic_state *state, struct intel_crtc *crtc); -int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state); +int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state); void intel_psr_connector_debugfs_add(struct intel_connector *connector); void intel_psr_debugfs_register(struct intel_display *display); bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); +void intel_psr_compute_config_late(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state); +int intel_psr_min_guardband(struct intel_crtc_state *crtc_state); #endif /* __INTEL_PSR_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c index 600c815e37e4..c05d4beb91d8 100644 --- a/drivers/gpu/drm/i915/display/intel_qp_tables.c +++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c @@ -5,7 +5,7 @@ #include <drm/display/drm_dsc.h> -#include "i915_utils.h" +#include "intel_display_utils.h" #include "intel_qp_tables.h" /* from BPP 6 to 24 in steps of 0.5 */ diff --git a/drivers/gpu/drm/i915/display/intel_sbi.c b/drivers/gpu/drm/i915/display/intel_sbi.c index dfcff924f0ed..b636a0060d39 100644 --- a/drivers/gpu/drm/i915/display/intel_sbi.c +++ b/drivers/gpu/drm/i915/display/intel_sbi.c @@ -21,7 +21,8 @@ static int intel_sbi_rw(struct intel_display *display, u16 reg, lockdep_assert_held(&display->sbi.lock); - if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, NULL)) { + if (intel_de_wait_fw_ms(display, SBI_CTL_STAT, + SBI_STATUS_MASK, SBI_STATUS_READY, 100, NULL)) { drm_err(display->drm, "timeout waiting for SBI to become ready\n"); return -EBUSY; } @@ -37,7 +38,8 @@ static int intel_sbi_rw(struct intel_display *display, u16 reg, cmd |= SBI_CTL_OP_WR; intel_de_write_fw(display, SBI_CTL_STAT, cmd | SBI_STATUS_BUSY); - if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, &cmd)) { + if (intel_de_wait_fw_ms(display, SBI_CTL_STAT, + SBI_STATUS_MASK, SBI_STATUS_READY, 100, &cmd)) { drm_err(display->drm, "timeout waiting for SBI to complete read\n"); return -ETIMEDOUT; } diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c index 7fe6b4a18213..a201edceee10 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c +++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c @@ -332,6 +332,8 @@ void intel_snps_hdmi_pll_compute_c10pll(struct intel_c10pll_state *pll_state, u6 c10_curve_1, c10_curve_2, prescaler_divider, &pll_params); + pll_state->clock = pixel_clock; + pll_state->tx = 0x10; pll_state->cmn = 0x1; pll_state->pll[0] = REG_FIELD_PREP(C10_PLL0_DIV5CLK_EN, pll_params.mpll_div5_en) | diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index b2dd69a11124..295030742294 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -7,12 +7,12 @@ #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_snps_hdmi_pll.h" #include "intel_snps_phy.h" #include "intel_snps_phy_regs.h" @@ -42,8 +42,8 @@ void intel_snps_phy_wait_for_calibration(struct intel_display *display) * which phy was affected and skip setup of the corresponding * output later. */ - if (intel_de_wait_for_clear(display, DG2_PHY_MISC(phy), - DG2_PHY_DP_TX_ACK_MASK, 25)) + if (intel_de_wait_for_clear_ms(display, DG2_PHY_MISC(phy), + DG2_PHY_DP_TX_ACK_MASK, 25)) display->snps.phy_failed_calibration |= BIT(phy); } } @@ -1863,7 +1863,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder, * is locked at new settings. This register bit is sampling PHY * dp_mpllb_state interface signal. */ - if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 5)) + if (intel_de_wait_for_set_ms(display, enable_reg, PLL_LOCK, 5)) drm_dbg_kms(display->drm, "Port %c PLL not locked\n", phy_name(phy)); /* @@ -1903,7 +1903,7 @@ void intel_mpllb_disable(struct intel_encoder *encoder) * 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment * (dp_txX_ack) that the new transmitter setting request is completed. */ - if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 5)) + if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_LOCK, 5)) drm_err(display->drm, "Port %c PLL not locked\n", phy_name(phy)); /* diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 75bbaa923204..69b6873a6044 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -39,10 +39,10 @@ #include <drm/drm_print.h> #include <drm/drm_rect.h> -#include "i915_utils.h" #include "i9xx_plane.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_fb.h" #include "intel_frontbuffer.h" #include "intel_plane.h" @@ -958,10 +958,9 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, static unsigned int g4x_sprite_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { - const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */ @@ -973,10 +972,9 @@ g4x_sprite_max_stride(struct intel_plane *plane, static unsigned int hsw_sprite_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { - const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */ diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index c4a5601c5107..1e21fd02685d 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -8,7 +8,6 @@ #include <drm/drm_print.h> #include "i915_reg.h" -#include "i915_utils.h" #include "intel_atomic.h" #include "intel_cx0_phy_regs.h" #include "intel_ddi.h" @@ -18,6 +17,7 @@ #include "intel_display_power_map.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dkl_phy_regs.h" #include "intel_dp.h" #include "intel_dp_mst.h" @@ -1076,8 +1076,8 @@ xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled) static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable) { /* check if mailbox is running busy */ - if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD, - TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) { + if (intel_de_wait_for_clear_ms(display, TCSS_DISP_MAILBOX_IN_CMD, + TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) { drm_dbg_kms(display->drm, "Timeout waiting for TCSS mailbox run/busy bit to clear\n"); return; @@ -1089,8 +1089,8 @@ static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enabl TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1)); /* wait to clear mailbox running busy bit before continuing */ - if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD, - TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) { + if (intel_de_wait_for_clear_ms(display, TCSS_DISP_MAILBOX_IN_CMD, + TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) { drm_dbg_kms(display->drm, "Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n"); return; @@ -1703,6 +1703,19 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, mutex_unlock(&tc->lock); } +void intel_tc_info(struct drm_printer *p, struct intel_digital_port *dig_port) +{ + struct intel_tc_port *tc = to_tc_port(dig_port); + + intel_tc_port_lock(dig_port); + drm_printf(p, "\tTC Port %s: mode: %s, pin assignment: %c, max lanes: %d\n", + tc->port_name, + tc_port_mode_name(tc->mode), + pin_assignment_name(tc->pin_assignment), + tc->max_lane_count); + intel_tc_port_unlock(dig_port); +} + /* * The type-C ports are different because even when they are connected, they may * not be available/usable by the graphics driver: see the comment on diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index fff8b96e4972..6719aea5bd58 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -8,6 +8,7 @@ #include <linux/types.h> +struct drm_printer; struct intel_crtc_state; struct intel_digital_port; struct intel_encoder; @@ -113,4 +114,6 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port); bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port); +void intel_tc_info(struct drm_printer *p, struct intel_digital_port *dig_port); + #endif /* __INTEL_TC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index c15234c1d96e..671f357c6563 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -5,15 +5,17 @@ #include <linux/iopoll.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "i915_drv.h" -#include "i915_utils.h" #include "intel_color.h" #include "intel_crtc.h" #include "intel_de.h" +#include "intel_display_jiffies.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_vblank.h" #include "intel_vrr.h" @@ -681,7 +683,7 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state, else evade->vblank_start = intel_vrr_vmax_vblank_start(crtc_state); - vblank_delay = intel_vrr_vblank_delay(crtc_state); + vblank_delay = crtc_state->set_context_latency; } else { evade->vblank_start = intel_mode_vblank_start(adjusted_mode); @@ -767,3 +769,13 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade) return scanline; } + +int intel_crtc_vblank_length(const struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + + if (crtc_state->vrr.enable) + return crtc_state->vrr.guardband; + else + return adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start; +} diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h index 21fbb08d61d5..98d04cacd65f 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.h +++ b/drivers/gpu/drm/i915/display/intel_vblank.h @@ -48,4 +48,6 @@ const struct intel_crtc_state * intel_pre_commit_crtc_state(struct intel_atomic_state *state, struct intel_crtc *crtc); +int intel_crtc_vblank_length(const struct intel_crtc_state *crtc_state); + #endif /* __INTEL_VBLANK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 8e799e225af1..0e727fc5e80c 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -11,10 +11,10 @@ #include <drm/drm_fixed.h> #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dp.h" #include "intel_dsi.h" #include "intel_qp_tables.h" @@ -372,6 +372,22 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) return 0; } +void intel_dsc_enable_on_crtc(struct intel_crtc_state *crtc_state) +{ + crtc_state->dsc.compression_enabled_on_link = true; + crtc_state->dsc.compression_enable = true; +} + +bool intel_dsc_enabled_on_link(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + drm_WARN_ON(display->drm, crtc_state->dsc.compression_enable && + !crtc_state->dsc.compression_enabled_on_link); + + return crtc_state->dsc.compression_enabled_on_link; +} + enum intel_display_power_domain intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder) { @@ -1077,3 +1093,11 @@ int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state) return min_cdclk; } + +unsigned int intel_vdsc_prefill_lines(const struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->dsc.compression_enable) + return 0; + + return 0x18000; /* 1.5 */ +} diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h index 9e2812f99dd7..99f64ac54b27 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc.h @@ -20,6 +20,8 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) void intel_dsc_enable(const struct intel_crtc_state *crtc_state); void intel_dsc_disable(const struct intel_crtc_state *crtc_state); int intel_dsc_compute_params(struct intel_crtc_state *pipe_config); +void intel_dsc_enable_on_crtc(struct intel_crtc_state *crtc_state); +bool intel_dsc_enabled_on_link(const struct intel_crtc_state *crtc_state); void intel_dsc_get_config(struct intel_crtc_state *crtc_state); enum intel_display_power_domain intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder); @@ -32,5 +34,6 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder, void intel_vdsc_state_dump(struct drm_printer *p, int indent, const struct intel_crtc_state *crtc_state); int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state); +unsigned int intel_vdsc_prefill_lines(const struct intel_crtc_state *crtc_state); #endif /* __INTEL_VDSC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 3eed37f271b0..b92c42fde937 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -10,8 +10,11 @@ #include "intel_display_regs.h" #include "intel_display_types.h" #include "intel_dp.h" +#include "intel_psr.h" #include "intel_vrr.h" #include "intel_vrr_regs.h" +#include "skl_prefill.h" +#include "skl_watermark.h" #define FIXED_POINT_PRECISION 100 #define CMRR_PRECISION_TOLERANCE 10 @@ -22,6 +25,9 @@ bool intel_vrr_is_capable(struct intel_connector *connector) const struct drm_display_info *info = &connector->base.display_info; struct intel_dp *intel_dp; + if (!HAS_VRR(display)) + return false; + /* * DP Sink is capable of VRR video timings if * Ignore MSA bit is set in DPCD. @@ -46,8 +52,7 @@ bool intel_vrr_is_capable(struct intel_connector *connector) return false; } - return HAS_VRR(display) && - info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10; + return info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10; } bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh) @@ -79,44 +84,42 @@ intel_vrr_check_modeset(struct intel_atomic_state *state) } } -static int intel_vrr_real_vblank_delay(const struct intel_crtc_state *crtc_state) -{ - return crtc_state->hw.adjusted_mode.crtc_vblank_start - - crtc_state->hw.adjusted_mode.crtc_vdisplay; -} - static int intel_vrr_extra_vblank_delay(struct intel_display *display) { /* * On ICL/TGL VRR hardware inserts one extra scanline * just after vactive, which pushes the vmin decision - * boundary ahead accordingly. We'll include the extra - * scanline in our vblank delay estimates to make sure - * that we never underestimate how long we have until - * the delayed vblank has passed. + * boundary ahead accordingly, and thus reduces the + * max guardband length by one scanline. */ return DISPLAY_VER(display) < 13 ? 1 : 0; } -int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state) +static int intel_vrr_vmin_flipline_offset(struct intel_display *display) { - struct intel_display *display = to_intel_display(crtc_state); - - return intel_vrr_real_vblank_delay(crtc_state) + - intel_vrr_extra_vblank_delay(display); + /* + * ICL/TGL hardware imposes flipline>=vmin+1 + * + * We reduce the vmin value to compensate when programming the + * hardware. This approach allows flipline to remain set at the + * original value, and thus the frame will have the desired + * minimum vtotal. + */ + return DISPLAY_VER(display) < 13 ? 1 : 0; } -static int intel_vrr_flipline_offset(struct intel_display *display) +static int intel_vrr_guardband_to_pipeline_full(const struct intel_crtc_state *crtc_state, + int guardband) { - /* ICL/TGL hardware imposes flipline>=vmin+1 */ - return DISPLAY_VER(display) < 13 ? 1 : 0; + /* hardware imposes one extra scanline somewhere */ + return guardband - crtc_state->framestart_delay - 1; } -static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state) +static int intel_vrr_pipeline_full_to_guardband(const struct intel_crtc_state *crtc_state, + int pipeline_full) { - struct intel_display *display = to_intel_display(crtc_state); - - return crtc_state->vrr.vmin + intel_vrr_flipline_offset(display); + /* hardware imposes one extra scanline somewhere */ + return pipeline_full + crtc_state->framestart_delay + 1; } /* @@ -135,48 +138,26 @@ static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state) * * framestart_delay is programmable 1-4. */ -static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state) -{ - struct intel_display *display = to_intel_display(crtc_state); - - if (DISPLAY_VER(display) >= 13) - return crtc_state->vrr.guardband; - else - /* hardware imposes one extra scanline somewhere */ - return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1; -} int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state) { - struct intel_display *display = to_intel_display(crtc_state); - /* Min vblank actually determined by flipline */ - if (DISPLAY_VER(display) >= 13) - return intel_vrr_vmin_flipline(crtc_state); - else - return intel_vrr_vmin_flipline(crtc_state) + - intel_vrr_real_vblank_delay(crtc_state); + return crtc_state->vrr.vmin; } int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state) { - struct intel_display *display = to_intel_display(crtc_state); - - if (DISPLAY_VER(display) >= 13) - return crtc_state->vrr.vmax; - else - return crtc_state->vrr.vmax + - intel_vrr_real_vblank_delay(crtc_state); + return crtc_state->vrr.vmax; } int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state) { - return intel_vrr_vmin_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state); + return intel_vrr_vmin_vtotal(crtc_state) - crtc_state->vrr.guardband; } int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state) { - return intel_vrr_vmax_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state); + return intel_vrr_vmax_vtotal(crtc_state) - crtc_state->vrr.guardband; } static bool @@ -230,7 +211,6 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required) static void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state) { - crtc_state->cmrr.enable = true; /* * TODO: Compute precise target refresh rate to determine * if video_mode_required should be true. Currently set to @@ -240,52 +220,76 @@ void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state) crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false); crtc_state->vrr.vmin = crtc_state->vrr.vmax; crtc_state->vrr.flipline = crtc_state->vrr.vmin; + + crtc_state->cmrr.enable = true; crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } static -void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state) +void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state, + int vmin, int vmax) { + crtc_state->vrr.vmax = vmax; + crtc_state->vrr.vmin = vmin; + crtc_state->vrr.flipline = crtc_state->vrr.vmin; + crtc_state->vrr.enable = true; crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } -/* - * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to - * Vtotal value. - */ static -int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state) +void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state) +{ + /* For fixed rr, vmin = vmax = flipline */ + crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal; + crtc_state->vrr.vmin = crtc_state->vrr.vmax; + crtc_state->vrr.flipline = crtc_state->vrr.vmin; +} + +static int intel_vrr_hw_value(const struct intel_crtc_state *crtc_state, + int value) { struct intel_display *display = to_intel_display(crtc_state); - int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal; + /* + * On TGL vmin/vmax/flipline also need to be + * adjusted by the SCL to maintain correct vtotals. + */ if (DISPLAY_VER(display) >= 13) - return crtc_vtotal; + return value; else - return crtc_vtotal - - intel_vrr_real_vblank_delay(crtc_state); + return value - crtc_state->set_context_latency; +} + +/* + * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to + * Vtotal value. + */ +static +int intel_vrr_fixed_rr_hw_vtotal(const struct intel_crtc_state *crtc_state) +{ + return intel_vrr_hw_value(crtc_state, crtc_state->hw.adjusted_mode.crtc_vtotal); } static -int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state) +int intel_vrr_fixed_rr_hw_vmax(const struct intel_crtc_state *crtc_state) { - return intel_vrr_fixed_rr_vtotal(crtc_state); + return intel_vrr_fixed_rr_hw_vtotal(crtc_state); } static -int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state) +int intel_vrr_fixed_rr_hw_vmin(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - return intel_vrr_fixed_rr_vtotal(crtc_state) - - intel_vrr_flipline_offset(display); + return intel_vrr_fixed_rr_hw_vtotal(crtc_state) - + intel_vrr_vmin_flipline_offset(display); } static -int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state) +int intel_vrr_fixed_rr_hw_flipline(const struct intel_crtc_state *crtc_state) { - return intel_vrr_fixed_rr_vtotal(crtc_state); + return intel_vrr_fixed_rr_hw_vtotal(crtc_state); } void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state) @@ -297,22 +301,11 @@ void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state) return; intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder), - intel_vrr_fixed_rr_vmin(crtc_state) - 1); + intel_vrr_fixed_rr_hw_vmin(crtc_state) - 1); intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder), - intel_vrr_fixed_rr_vmax(crtc_state) - 1); + intel_vrr_fixed_rr_hw_vmax(crtc_state) - 1); intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder), - intel_vrr_fixed_rr_flipline(crtc_state) - 1); -} - -static -void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state) -{ - /* - * For fixed rr, vmin = vmax = flipline. - * vmin is already set to crtc_vtotal set vmax and flipline the same. - */ - crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal; - crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal; + intel_vrr_fixed_rr_hw_flipline(crtc_state) - 1); } static @@ -384,60 +377,131 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, vmax = vmin; } - crtc_state->vrr.vmin = vmin; - crtc_state->vrr.vmax = vmax; - - crtc_state->vrr.flipline = crtc_state->vrr.vmin; - if (crtc_state->uapi.vrr_enabled && vmin < vmax) - intel_vrr_compute_vrr_timings(crtc_state); + intel_vrr_compute_vrr_timings(crtc_state, vmin, vmax); else if (is_cmrr_frac_required(crtc_state) && is_edp) intel_vrr_compute_cmrr_timings(crtc_state); else intel_vrr_compute_fixed_rr_timings(crtc_state); - /* - * flipline determines the min vblank length the hardware will - * generate, and on ICL/TGL flipline>=vmin+1, hence we reduce - * vmin by one to make sure we can get the actual min vblank length. - */ - crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display); - if (HAS_AS_SDP(display)) { crtc_state->vrr.vsync_start = (crtc_state->hw.adjusted_mode.crtc_vtotal - - crtc_state->hw.adjusted_mode.vsync_start); + crtc_state->hw.adjusted_mode.crtc_vsync_start); crtc_state->vrr.vsync_end = (crtc_state->hw.adjusted_mode.crtc_vtotal - - crtc_state->hw.adjusted_mode.vsync_end); + crtc_state->hw.adjusted_mode.crtc_vsync_end); } } -void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state) +static int +intel_vrr_max_hw_guardband(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + int max_pipeline_full = REG_FIELD_MAX(VRR_CTL_PIPELINE_FULL_MASK); + + if (DISPLAY_VER(display) >= 13) + return REG_FIELD_MAX(XELPD_VRR_CTL_VRR_GUARDBAND_MASK); + else + return intel_vrr_pipeline_full_to_guardband(crtc_state, + max_pipeline_full); +} + +static int +intel_vrr_max_vblank_guardband(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + return crtc_state->vrr.vmin - + adjusted_mode->crtc_vdisplay - + crtc_state->set_context_latency - + intel_vrr_extra_vblank_delay(display); +} + +static int +intel_vrr_max_guardband(struct intel_crtc_state *crtc_state) +{ + return min(intel_vrr_max_hw_guardband(crtc_state), + intel_vrr_max_vblank_guardband(crtc_state)); +} + +static +int intel_vrr_compute_optimized_guardband(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct skl_prefill_ctx prefill_ctx; + int prefill_latency_us; + int guardband = 0; + + skl_prefill_init_worst(&prefill_ctx, crtc_state); + + /* + * The SoC power controller runs SAGV mutually exclusive with package C states, + * so the max of package C and SAGV latencies is used to compute the min prefill guardband. + * PM delay = max(sagv_latency, pkgc_max_latency (highest enabled wm level 1 and up)) + */ + prefill_latency_us = max(display->sagv.block_time_us, + skl_watermark_max_latency(display, 1)); + + guardband = skl_prefill_min_guardband(&prefill_ctx, + crtc_state, + prefill_latency_us); + + if (intel_crtc_has_dp_encoder(crtc_state)) { + guardband = max(guardband, intel_psr_min_guardband(crtc_state)); + guardband = max(guardband, intel_dp_sdp_min_guardband(crtc_state, true)); + } + + return guardband; +} + +static bool intel_vrr_use_optimized_guardband(const struct intel_crtc_state *crtc_state) +{ + /* + * #TODO: Enable optimized guardband for HDMI + * For HDMI lot of infoframes are transmitted a line or two after vsync. + * Since with optimized guardband the double bufferring point is at delayed vblank, + * we need to ensure that vsync happens after delayed vblank for the HDMI case. + */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return false; + + return true; +} + +void intel_vrr_compute_guardband(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; + int guardband; + if (!intel_vrr_possible(crtc_state)) return; - if (DISPLAY_VER(display) >= 13) { - crtc_state->vrr.guardband = - crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start; - } else { - /* hardware imposes one extra scanline somewhere */ - crtc_state->vrr.pipeline_full = - min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start - - crtc_state->framestart_delay - 1); + if (intel_vrr_use_optimized_guardband(crtc_state)) + guardband = intel_vrr_compute_optimized_guardband(crtc_state); + else + guardband = crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay; + + crtc_state->vrr.guardband = min(guardband, intel_vrr_max_guardband(crtc_state)); + if (intel_vrr_always_use_vrr_tg(display)) { + adjusted_mode->crtc_vblank_start = + adjusted_mode->crtc_vtotal - crtc_state->vrr.guardband; /* - * vmin/vmax/flipline also need to be adjusted by - * the vblank delay to maintain correct vtotals. + * pipe_mode has already been derived from the + * original adjusted_mode, keep the two in sync. */ - crtc_state->vrr.vmin -= intel_vrr_real_vblank_delay(crtc_state); - crtc_state->vrr.vmax -= intel_vrr_real_vblank_delay(crtc_state); - crtc_state->vrr.flipline -= intel_vrr_real_vblank_delay(crtc_state); + pipe_mode->crtc_vblank_start = + adjusted_mode->crtc_vblank_start; } + + if (DISPLAY_VER(display) < 13) + crtc_state->vrr.pipeline_full = + intel_vrr_guardband_to_pipeline_full(crtc_state, + crtc_state->vrr.guardband); } static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state) @@ -461,6 +525,9 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state) struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + if (!HAS_VRR(display)) + return; + /* * This bit seems to have two meanings depending on the platform: * TGL: generate VRR "safe window" for DSB vblank waits @@ -489,7 +556,7 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state) intel_vrr_set_fixed_rr_timings(crtc_state); - if (!intel_vrr_always_use_vrr_tg(display) && !crtc_state->vrr.enable) + if (!intel_vrr_always_use_vrr_tg(display)) intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), trans_vrr_ctl(crtc_state)); @@ -498,6 +565,18 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state) TRANS_VRR_VSYNC(display, cpu_transcoder), VRR_VSYNC_END(crtc_state->vrr.vsync_end) | VRR_VSYNC_START(crtc_state->vrr.vsync_start)); + + /* + * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming + * double buffering point and transmission line for VRR packets for + * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON. + * Since currently we support VRR only for DP/eDP, so this is programmed + * to for Adaptive Sync SDP to Vsync start. + */ + if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20) + intel_de_write(display, + EMP_AS_SDP_TL(display, cpu_transcoder), + EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start)); } void intel_vrr_send_push(struct intel_dsb *dsb, @@ -576,126 +655,128 @@ bool intel_vrr_always_use_vrr_tg(struct intel_display *display) return false; } -static -void intel_vrr_set_db_point_and_transmission_line(const struct intel_crtc_state *crtc_state) +static int intel_vrr_hw_vmin(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - /* - * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming - * double buffering point and transmission line for VRR packets for - * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON. - * Since currently we support VRR only for DP/eDP, so this is programmed - * to for Adaptive Sync SDP to Vsync start. - */ - if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20) - intel_de_write(display, - EMP_AS_SDP_TL(display, cpu_transcoder), - EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start)); + return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmin) - + intel_vrr_vmin_flipline_offset(display); } -void intel_vrr_enable(const struct intel_crtc_state *crtc_state) +static int intel_vrr_hw_vmax(const struct intel_crtc_state *crtc_state) +{ + return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmax); +} + +static int intel_vrr_hw_flipline(const struct intel_crtc_state *crtc_state) +{ + return intel_vrr_hw_value(crtc_state, crtc_state->vrr.flipline); +} + +static void intel_vrr_set_vrr_timings(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (!crtc_state->vrr.enable) - return; - intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder), - crtc_state->vrr.vmin - 1); + intel_vrr_hw_vmin(crtc_state) - 1); intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder), - crtc_state->vrr.vmax - 1); + intel_vrr_hw_vmax(crtc_state) - 1); intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder), - crtc_state->vrr.flipline - 1); + intel_vrr_hw_flipline(crtc_state) - 1); +} - intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), - TRANS_PUSH_EN); +static void intel_vrr_tg_enable(const struct intel_crtc_state *crtc_state, + bool cmrr_enable) +{ + struct intel_display *display = to_intel_display(crtc_state); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 vrr_ctl; - if (!intel_vrr_always_use_vrr_tg(display)) { - intel_vrr_set_db_point_and_transmission_line(crtc_state); + intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN); - if (crtc_state->cmrr.enable) { - intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), - VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE | - trans_vrr_ctl(crtc_state)); - } else { - intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), - VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state)); - } - } + vrr_ctl = VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state); + + /* + * FIXME this might be broken as bspec seems to imply that + * even VRR_CTL_CMRR_ENABLE is armed by TRANS_CMRR_N_HI + * when enabling CMRR (but not when disabling CMRR?). + */ + if (cmrr_enable) + vrr_ctl |= VRR_CTL_CMRR_ENABLE; + + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), vrr_ctl); } -void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) +static void intel_vrr_tg_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_display *display = to_intel_display(old_crtc_state); enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; - if (!old_crtc_state->vrr.enable) - return; + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), + trans_vrr_ctl(old_crtc_state)); - if (!intel_vrr_always_use_vrr_tg(display)) { - intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), - trans_vrr_ctl(old_crtc_state)); - intel_de_wait_for_clear(display, - TRANS_VRR_STATUS(display, cpu_transcoder), - VRR_STATUS_VRR_EN_LIVE, 1000); - intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0); - } + if (intel_de_wait_for_clear_ms(display, + TRANS_VRR_STATUS(display, cpu_transcoder), + VRR_STATUS_VRR_EN_LIVE, 1000)) + drm_err(display->drm, "Timed out waiting for VRR live status to clear\n"); - intel_vrr_set_fixed_rr_timings(old_crtc_state); + intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0); } -void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state) +void intel_vrr_enable(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (!HAS_VRR(display)) + if (!crtc_state->vrr.enable) return; - if (!intel_vrr_possible(crtc_state)) - return; + intel_vrr_set_vrr_timings(crtc_state); - if (!intel_vrr_always_use_vrr_tg(display)) { - intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), - trans_vrr_ctl(crtc_state)); - return; - } + if (!intel_vrr_always_use_vrr_tg(display)) + intel_vrr_tg_enable(crtc_state, crtc_state->cmrr.enable); +} - intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), - TRANS_PUSH_EN); +void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) +{ + struct intel_display *display = to_intel_display(old_crtc_state); - intel_vrr_set_db_point_and_transmission_line(crtc_state); + if (!old_crtc_state->vrr.enable) + return; - intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), - VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state)); + if (!intel_vrr_always_use_vrr_tg(display)) + intel_vrr_tg_disable(old_crtc_state); + + intel_vrr_set_fixed_rr_timings(old_crtc_state); } -void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state) +void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - - if (!HAS_VRR(display)) - return; if (!intel_vrr_possible(crtc_state)) return; - intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0); + if (intel_vrr_always_use_vrr_tg(display)) + intel_vrr_tg_enable(crtc_state, false); +} - intel_de_wait_for_clear(display, TRANS_VRR_STATUS(display, cpu_transcoder), - VRR_STATUS_VRR_EN_LIVE, 1000); - intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0); +void intel_vrr_transcoder_disable(const struct intel_crtc_state *old_crtc_state) +{ + struct intel_display *display = to_intel_display(old_crtc_state); + + if (!intel_vrr_possible(old_crtc_state)) + return; + + if (intel_vrr_always_use_vrr_tg(display)) + intel_vrr_tg_disable(old_crtc_state); } bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state) { return crtc_state->vrr.flipline && crtc_state->vrr.flipline == crtc_state->vrr.vmax && - crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state); + crtc_state->vrr.flipline == crtc_state->vrr.vmin; } void intel_vrr_get_config(struct intel_crtc_state *crtc_state) @@ -720,14 +801,20 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) TRANS_CMRR_M_HI(display, cpu_transcoder)); } - if (DISPLAY_VER(display) >= 13) + if (DISPLAY_VER(display) >= 13) { crtc_state->vrr.guardband = REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl); - else - if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE) + } else { + if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE) { crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl); + crtc_state->vrr.guardband = + intel_vrr_pipeline_full_to_guardband(crtc_state, + crtc_state->vrr.pipeline_full); + } + } + if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) { crtc_state->vrr.flipline = intel_de_read(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder)) + 1; @@ -736,6 +823,15 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) crtc_state->vrr.vmin = intel_de_read(display, TRANS_VRR_VMIN(display, cpu_transcoder)) + 1; + if (DISPLAY_VER(display) < 13) { + /* undo what intel_vrr_hw_value() does when writing the values */ + crtc_state->vrr.flipline += crtc_state->set_context_latency; + crtc_state->vrr.vmax += crtc_state->set_context_latency; + crtc_state->vrr.vmin += crtc_state->set_context_latency; + + crtc_state->vrr.vmin += intel_vrr_vmin_flipline_offset(display); + } + /* * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal * bits are not filled. Since for these platforms TRAN_VMIN is always @@ -771,4 +867,34 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) */ if (crtc_state->vrr.enable) crtc_state->mode_flags |= I915_MODE_FLAG_VRR; + + /* + * For platforms that always use the VRR timing generator, we overwrite + * crtc_vblank_start with vtotal - guardband to reflect the delayed + * vblank start. This works for both default and optimized guardband values. + * On other platforms, we keep the original value from + * intel_get_transcoder_timings() and apply adjustments only in VRR-specific + * paths as needed. + */ + if (intel_vrr_always_use_vrr_tg(display)) + crtc_state->hw.adjusted_mode.crtc_vblank_start = + crtc_state->hw.adjusted_mode.crtc_vtotal - + crtc_state->vrr.guardband; +} + +int intel_vrr_safe_window_start(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + if (DISPLAY_VER(display) >= 30) + return crtc_state->hw.adjusted_mode.crtc_vdisplay - + crtc_state->set_context_latency; + else + return crtc_state->hw.adjusted_mode.crtc_vdisplay; +} + +int intel_vrr_vmin_safe_window_end(const struct intel_crtc_state *crtc_state) +{ + return intel_vrr_vmin_vblank_start(crtc_state) - + crtc_state->set_context_latency; } diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h index 38bf9996b883..bc9044621635 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.h +++ b/drivers/gpu/drm/i915/display/intel_vrr.h @@ -21,7 +21,7 @@ bool intel_vrr_possible(const struct intel_crtc_state *crtc_state); void intel_vrr_check_modeset(struct intel_atomic_state *state); void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state); -void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state); +void intel_vrr_compute_guardband(struct intel_crtc_state *crtc_state); void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state); void intel_vrr_enable(const struct intel_crtc_state *crtc_state); void intel_vrr_send_push(struct intel_dsb *dsb, @@ -35,11 +35,12 @@ int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state); int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state); int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state); int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state); -int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state); bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state); void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state); void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state); void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state); bool intel_vrr_always_use_vrr_tg(struct intel_display *display); +int intel_vrr_safe_window_start(const struct intel_crtc_state *crtc_state); +int intel_vrr_vmin_safe_window_end(const struct intel_crtc_state *crtc_state); #endif /* __INTEL_VRR_H__ */ diff --git a/drivers/gpu/drm/i915/display/skl_prefill.c b/drivers/gpu/drm/i915/display/skl_prefill.c new file mode 100644 index 000000000000..4707c2e7127a --- /dev/null +++ b/drivers/gpu/drm/i915/display/skl_prefill.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <linux/debugfs.h> + +#include <drm/drm_print.h> + +#include "intel_cdclk.h" +#include "intel_display_core.h" +#include "intel_display_types.h" +#include "intel_vblank.h" +#include "intel_vdsc.h" +#include "skl_prefill.h" +#include "skl_scaler.h" +#include "skl_watermark.h" + +static unsigned int prefill_usecs_to_lines(const struct intel_crtc_state *crtc_state, + unsigned int usecs) +{ + const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; + + return DIV_ROUND_UP_ULL(mul_u32_u32(pipe_mode->crtc_clock, usecs << 16), + pipe_mode->crtc_htotal * 1000); +} + +static void prefill_init(struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state) +{ + memset(ctx, 0, sizeof(*ctx)); + + ctx->prefill.fixed = crtc_state->framestart_delay << 16; + + /* 20 usec for translation walks/etc. */ + ctx->prefill.fixed += prefill_usecs_to_lines(crtc_state, 20); + + ctx->prefill.dsc = intel_vdsc_prefill_lines(crtc_state); +} + +static void prefill_init_nocdclk_worst(struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state) +{ + prefill_init(ctx, crtc_state); + + ctx->prefill.wm0 = skl_wm0_prefill_lines_worst(crtc_state); + ctx->prefill.scaler_1st = skl_scaler_1st_prefill_lines_worst(crtc_state); + ctx->prefill.scaler_2nd = skl_scaler_2nd_prefill_lines_worst(crtc_state); + + ctx->adj.scaler_1st = skl_scaler_1st_prefill_adjustment_worst(crtc_state); + ctx->adj.scaler_2nd = skl_scaler_2nd_prefill_adjustment_worst(crtc_state); +} + +static void prefill_init_nocdclk(struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state) +{ + prefill_init(ctx, crtc_state); + + ctx->prefill.wm0 = skl_wm0_prefill_lines(crtc_state); + ctx->prefill.scaler_1st = skl_scaler_1st_prefill_lines(crtc_state); + ctx->prefill.scaler_2nd = skl_scaler_2nd_prefill_lines(crtc_state); + + ctx->adj.scaler_1st = skl_scaler_1st_prefill_adjustment(crtc_state); + ctx->adj.scaler_2nd = skl_scaler_2nd_prefill_adjustment(crtc_state); +} + +static unsigned int prefill_adjust(unsigned int value, unsigned int factor) +{ + return DIV_ROUND_UP_ULL(mul_u32_u32(value, factor), 0x10000); +} + +static unsigned int prefill_lines_nocdclk(const struct skl_prefill_ctx *ctx) +{ + unsigned int prefill = 0; + + prefill += ctx->prefill.dsc; + prefill = prefill_adjust(prefill, ctx->adj.scaler_2nd); + + prefill += ctx->prefill.scaler_2nd; + prefill = prefill_adjust(prefill, ctx->adj.scaler_1st); + + prefill += ctx->prefill.scaler_1st; + prefill += ctx->prefill.wm0; + + return prefill; +} + +static unsigned int prefill_lines_cdclk(const struct skl_prefill_ctx *ctx) +{ + return prefill_adjust(prefill_lines_nocdclk(ctx), ctx->adj.cdclk); +} + +static unsigned int prefill_lines_full(const struct skl_prefill_ctx *ctx) +{ + return ctx->prefill.fixed + prefill_lines_cdclk(ctx); +} + +void skl_prefill_init_worst(struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state) +{ + prefill_init_nocdclk_worst(ctx, crtc_state); + + ctx->adj.cdclk = intel_cdclk_prefill_adjustment_worst(crtc_state); + + ctx->prefill.full = prefill_lines_full(ctx); +} + +void skl_prefill_init(struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state) +{ + prefill_init_nocdclk(ctx, crtc_state); + + ctx->adj.cdclk = intel_cdclk_prefill_adjustment(crtc_state); + + ctx->prefill.full = prefill_lines_full(ctx); +} + +static unsigned int prefill_lines_with_latency(const struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state, + unsigned int latency_us) +{ + return ctx->prefill.full + prefill_usecs_to_lines(crtc_state, latency_us); +} + +int skl_prefill_min_guardband(const struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state, + unsigned int latency_us) +{ + unsigned int prefill = prefill_lines_with_latency(ctx, crtc_state, latency_us); + + return DIV_ROUND_UP(prefill, 0x10000); +} + +static unsigned int prefill_guardband(const struct intel_crtc_state *crtc_state) +{ + return intel_crtc_vblank_length(crtc_state) << 16; +} + +bool skl_prefill_vblank_too_short(const struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state, + unsigned int latency_us) +{ + unsigned int guardband = prefill_guardband(crtc_state); + unsigned int prefill = prefill_lines_with_latency(ctx, crtc_state, latency_us); + + return guardband < prefill; +} + +int skl_prefill_min_cdclk(const struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state) +{ + unsigned int prefill_unadjusted = prefill_lines_nocdclk(ctx); + unsigned int prefill_available = prefill_guardband(crtc_state) - ctx->prefill.fixed; + + return intel_cdclk_min_cdclk_for_prefill(crtc_state, prefill_unadjusted, + prefill_available); +} diff --git a/drivers/gpu/drm/i915/display/skl_prefill.h b/drivers/gpu/drm/i915/display/skl_prefill.h new file mode 100644 index 000000000000..028ee19b64ce --- /dev/null +++ b/drivers/gpu/drm/i915/display/skl_prefill.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef __SKL_PREFILL_H__ +#define __SKL_PREFILL_H__ + +#include <linux/types.h> + +struct intel_crtc_state; + +struct skl_prefill_ctx { + /* .16 scanlines */ + struct { + unsigned int fixed; + unsigned int wm0; + unsigned int scaler_1st; + unsigned int scaler_2nd; + unsigned int dsc; + unsigned int full; + } prefill; + + /* .16 adjustment factors */ + struct { + unsigned int cdclk; + unsigned int scaler_1st; + unsigned int scaler_2nd; + } adj; +}; + +void skl_prefill_init_worst(struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state); +void skl_prefill_init(struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state); + +bool skl_prefill_vblank_too_short(const struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state, + unsigned int latency_us); +int skl_prefill_min_guardband(const struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state, + unsigned int latency_us); +int skl_prefill_min_cdclk(const struct skl_prefill_ctx *ctx, + const struct intel_crtc_state *crtc_state); + +#endif /* __SKL_PREFILL_H__ */ diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index c6cccf170ff1..4c4deac7f9c8 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -5,11 +5,13 @@ #include <drm/drm_print.h> -#include "i915_utils.h" +#include "intel_casf.h" +#include "intel_casf_regs.h" #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_trace.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_display_wa.h" #include "intel_fb.h" #include "skl_scaler.h" @@ -282,7 +284,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) drm_rect_width(&crtc_state->pipe_src), drm_rect_height(&crtc_state->pipe_src), width, height, NULL, 0, - crtc_state->pch_pfit.enabled); + crtc_state->pch_pfit.enabled || + intel_casf_needs_scaler(crtc_state)); } /** @@ -321,7 +324,9 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, } static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state, - struct intel_crtc *crtc) + struct intel_crtc *crtc, + struct intel_plane_state *plane_state, + bool casf_scaler) { int i; @@ -329,6 +334,10 @@ static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state, if (scaler_state->scalers[i].in_use) continue; + /* CASF needs second scaler */ + if (!plane_state && casf_scaler && i != 1) + continue; + scaler_state->scalers[i].in_use = true; return i; @@ -379,7 +388,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state, int num_scalers_need, struct intel_crtc *crtc, const char *name, int idx, struct intel_plane_state *plane_state, - int *scaler_id) + int *scaler_id, bool casf_scaler) { struct intel_display *display = to_intel_display(crtc); struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; @@ -388,7 +397,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state, int vscale = 0; if (*scaler_id < 0) - *scaler_id = intel_allocate_scaler(scaler_state, crtc); + *scaler_id = intel_allocate_scaler(scaler_state, crtc, plane_state, casf_scaler); if (drm_WARN(display->drm, *scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx)) @@ -520,10 +529,14 @@ static int setup_crtc_scaler(struct intel_atomic_state *state, struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; + if (intel_casf_needs_scaler(crtc_state) && crtc_state->pch_pfit.enabled) + return -EINVAL; + return intel_atomic_setup_scaler(crtc_state, hweight32(scaler_state->scaler_users), crtc, "CRTC", crtc->base.base.id, - NULL, &scaler_state->scaler_id); + NULL, &scaler_state->scaler_id, + intel_casf_needs_scaler(crtc_state)); } static int setup_plane_scaler(struct intel_atomic_state *state, @@ -558,7 +571,8 @@ static int setup_plane_scaler(struct intel_atomic_state *state, return intel_atomic_setup_scaler(crtc_state, hweight32(scaler_state->scaler_users), crtc, "PLANE", plane->base.base.id, - plane_state, &plane_state->scaler_id); + plane_state, &plane_state->scaler_id, + false); } /** @@ -738,6 +752,52 @@ static void skl_scaler_setup_filter(struct intel_display *display, } } +#define CASF_SCALER_FILTER_SELECT \ + (PS_FILTER_PROGRAMMED | \ + PS_Y_VERT_FILTER_SELECT(0) | \ + PS_Y_HORZ_FILTER_SELECT(0) | \ + PS_UV_VERT_FILTER_SELECT(0) | \ + PS_UV_HORZ_FILTER_SELECT(0)) + +void skl_scaler_setup_casf(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_display *display = to_intel_display(crtc); + struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; + struct drm_rect src, dest; + int id, width, height; + int x = 0, y = 0; + enum pipe pipe = crtc->pipe; + u32 ps_ctrl; + + width = adjusted_mode->crtc_hdisplay; + height = adjusted_mode->crtc_vdisplay; + + drm_rect_init(&dest, x, y, width, height); + + width = drm_rect_width(&dest); + height = drm_rect_height(&dest); + id = scaler_state->scaler_id; + + drm_rect_init(&src, 0, 0, + drm_rect_width(&crtc_state->pipe_src) << 16, + drm_rect_height(&crtc_state->pipe_src) << 16); + + trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height); + + ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode | + CASF_SCALER_FILTER_SELECT; + + intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl); + intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, id), + PS_WIN_XPOS(x) | PS_WIN_YPOS(y)); + intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, id), + PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height)); +} + void skl_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); @@ -921,16 +981,23 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state) continue; id = i; - crtc_state->pch_pfit.enabled = true; + + /* Read CASF regs for second scaler */ + if (HAS_CASF(display) && id == 1) + intel_casf_sharpness_get_config(crtc_state); + + if (!crtc_state->hw.casf_params.casf_enable) + crtc_state->pch_pfit.enabled = true; pos = intel_de_read(display, SKL_PS_WIN_POS(crtc->pipe, i)); size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, i)); - drm_rect_init(&crtc_state->pch_pfit.dst, - REG_FIELD_GET(PS_WIN_XPOS_MASK, pos), - REG_FIELD_GET(PS_WIN_YPOS_MASK, pos), - REG_FIELD_GET(PS_WIN_XSIZE_MASK, size), - REG_FIELD_GET(PS_WIN_YSIZE_MASK, size)); + if (!crtc_state->hw.casf_params.casf_enable) + drm_rect_init(&crtc_state->pch_pfit.dst, + REG_FIELD_GET(PS_WIN_XPOS_MASK, pos), + REG_FIELD_GET(PS_WIN_YPOS_MASK, pos), + REG_FIELD_GET(PS_WIN_XSIZE_MASK, size), + REG_FIELD_GET(PS_WIN_YSIZE_MASK, size)); scaler_state->scalers[i].in_use = true; break; @@ -968,3 +1035,144 @@ void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state) 1); intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, 0); } + +unsigned int skl_scaler_1st_prefill_adjustment(const struct intel_crtc_state *crtc_state) +{ + /* + * FIXME don't have scalers assigned yet + * so can't look up the scale factors + */ + return 0x10000; +} + +unsigned int skl_scaler_2nd_prefill_adjustment(const struct intel_crtc_state *crtc_state) +{ + /* + * FIXME don't have scalers assigned yet + * so can't look up the scale factors + */ + return 0x10000; +} + +unsigned int skl_scaler_1st_prefill_lines(const struct intel_crtc_state *crtc_state) +{ + const struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; + int num_scalers = hweight32(scaler_state->scaler_users); + + if (num_scalers > 0) + return 4 << 16; + + return 0; +} + +unsigned int skl_scaler_2nd_prefill_lines(const struct intel_crtc_state *crtc_state) +{ + const struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; + int num_scalers = hweight32(scaler_state->scaler_users); + + if (num_scalers > 1 && crtc_state->pch_pfit.enabled) + return 4 << 16; + + return 0; +} + +static unsigned int _skl_scaler_max_scale(const struct intel_crtc_state *crtc_state, + unsigned int max_scale) +{ + struct intel_display *display = to_intel_display(crtc_state); + + /* + * Downscaling requires increasing cdclk, so max scale + * factor is limited to the max_dotclock/dotclock ratio. + * + * FIXME find out the max downscale factors properly + */ + return min(max_scale, DIV_ROUND_UP_ULL((u64)display->cdclk.max_dotclk_freq << 16, + crtc_state->hw.pipe_mode.crtc_clock)); +} + +unsigned int skl_scaler_max_total_scale(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + unsigned int max_scale; + + if (crtc->num_scalers < 1) + return 0x10000; + + /* FIXME find out the max downscale factors properly */ + max_scale = 9 << 16; + if (crtc->num_scalers > 1) + max_scale *= 9; + + return _skl_scaler_max_scale(crtc_state, max_scale); +} + +unsigned int skl_scaler_max_hscale(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + unsigned int max_scale; + + if (crtc->num_scalers < 1) + return 0x10000; + + /* FIXME find out the max downscale factors properly */ + max_scale = 3 << 16; + + return _skl_scaler_max_scale(crtc_state, max_scale); +} + +unsigned int skl_scaler_max_scale(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + unsigned int max_scale; + + if (crtc->num_scalers < 1) + return 0x10000; + + /* FIXME find out the max downscale factors properly */ + max_scale = 9 << 16; + + return _skl_scaler_max_scale(crtc_state, max_scale); +} + +unsigned int skl_scaler_1st_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc->num_scalers > 0) + return skl_scaler_max_scale(crtc_state); + else + return 0x10000; +} + +unsigned int skl_scaler_2nd_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc->num_scalers > 1) + return skl_scaler_max_scale(crtc_state); + else + return 0x10000; +} + +unsigned int skl_scaler_1st_prefill_lines_worst(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc->num_scalers > 0) + return 4 << 16; + else + return 0; +} + +unsigned int skl_scaler_2nd_prefill_lines_worst(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc->num_scalers > 1) + return 4 << 16; + else + return 0; +} diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h index 12a19016c5f6..7e8d819c019d 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.h +++ b/drivers/gpu/drm/i915/display/skl_scaler.h @@ -36,6 +36,8 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state); void skl_scaler_get_config(struct intel_crtc_state *crtc_state); +void skl_scaler_setup_casf(struct intel_crtc_state *crtc_state); + enum drm_mode_status skl_scaler_mode_valid(struct intel_display *display, const struct drm_display_mode *mode, @@ -45,4 +47,19 @@ skl_scaler_mode_valid(struct intel_display *display, void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state); void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state); + +unsigned int skl_scaler_max_total_scale(const struct intel_crtc_state *crtc_state); +unsigned int skl_scaler_max_scale(const struct intel_crtc_state *crtc_state); +unsigned int skl_scaler_max_hscale(const struct intel_crtc_state *crtc_state); + +unsigned int skl_scaler_1st_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state); +unsigned int skl_scaler_2nd_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state); +unsigned int skl_scaler_1st_prefill_lines_worst(const struct intel_crtc_state *crtc_state); +unsigned int skl_scaler_2nd_prefill_lines_worst(const struct intel_crtc_state *crtc_state); + +unsigned int skl_scaler_1st_prefill_adjustment(const struct intel_crtc_state *crtc_state); +unsigned int skl_scaler_2nd_prefill_adjustment(const struct intel_crtc_state *crtc_state); +unsigned int skl_scaler_1st_prefill_lines(const struct intel_crtc_state *crtc_state); +unsigned int skl_scaler_2nd_prefill_lines(const struct intel_crtc_state *crtc_state); + #endif diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index e13fb781e7b2..89c8003ccfe7 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -7,15 +7,15 @@ #include <drm/drm_blend.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include "pxp/intel_pxp.h" -#include "i915_drv.h" -#include "i915_utils.h" #include "intel_bo.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dpt.h" #include "intel_fb.h" #include "intel_fbc.h" @@ -24,6 +24,7 @@ #include "intel_plane.h" #include "intel_psr.h" #include "intel_psr_regs.h" +#include "intel_step.h" #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_universal_plane_regs.h" @@ -389,44 +390,19 @@ static int glk_plane_max_width(const struct drm_framebuffer *fb, } } +static int adl_plane_min_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + return 16 / fb->format->cpp[color_plane]; +} + static int icl_plane_min_width(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { /* Wa_14011264657, Wa_14011050563: gen11+ */ - switch (fb->format->format) { - case DRM_FORMAT_C8: - return 18; - case DRM_FORMAT_RGB565: - return 10; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_XVYU2101010: - case DRM_FORMAT_Y212: - case DRM_FORMAT_Y216: - return 6; - case DRM_FORMAT_NV12: - return 20; - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - return 12; - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ARGB16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_XVYU12_16161616: - case DRM_FORMAT_XVYU16161616: - return 4; - default: - return 1; - } + return 16 / fb->format->cpp[color_plane] + 2; } static int xe3_plane_max_width(const struct drm_framebuffer *fb, @@ -463,6 +439,23 @@ static int skl_plane_max_height(const struct drm_framebuffer *fb, return 4096; } +static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe) +{ + return pipe - PIPE_A + INTEL_FBC_A; +} + +static bool skl_plane_has_fbc(struct intel_display *display, + enum intel_fbc_id fbc_id, enum plane_id plane_id) +{ + if ((DISPLAY_RUNTIME_INFO(display)->fbc_mask & BIT(fbc_id)) == 0) + return false; + + if (DISPLAY_VER(display) >= 20) + return icl_is_hdr_plane(display, plane_id); + else + return plane_id == PLANE_1; +} + static int icl_plane_max_height(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) @@ -472,12 +465,11 @@ static int icl_plane_max_height(const struct drm_framebuffer *fb, static unsigned int plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation, + const struct drm_format_info *info, + u64 modifier, unsigned int rotation, unsigned int max_pixels, unsigned int max_bytes) { - const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; if (drm_rotation_90_or_270(rotation)) @@ -488,26 +480,26 @@ plane_max_stride(struct intel_plane *plane, static unsigned int adl_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { unsigned int max_pixels = 65536; /* PLANE_OFFSET limit */ unsigned int max_bytes = 128 * 1024; - return plane_max_stride(plane, pixel_format, + return plane_max_stride(plane, info, modifier, rotation, max_pixels, max_bytes); } static unsigned int skl_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) + const struct drm_format_info *info, + u64 modifier, unsigned int rotation) { unsigned int max_pixels = 8192; /* PLANE_OFFSET limit */ unsigned int max_bytes = 32 * 1024; - return plane_max_stride(plane, pixel_format, + return plane_max_stride(plane, info, modifier, rotation, max_pixels, max_bytes); } @@ -898,6 +890,25 @@ static void icl_plane_disable_sel_fetch_arm(struct intel_dsb *dsb, intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0); } +static void x3p_lpd_plane_update_pixel_normalizer(struct intel_dsb *dsb, + struct intel_plane *plane, + bool enable) +{ + struct intel_display *display = to_intel_display(plane); + enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(plane->pipe); + u32 val; + + /* Only HDR planes have pixel normalizer and don't matter if no FBC */ + if (!skl_plane_has_fbc(display, fbc_id, plane->id)) + return; + + val = enable ? PLANE_PIXEL_NORMALIZE_NORM_FACTOR(PLANE_PIXEL_NORMALIZE_NORM_FACTOR_1_0) | + PLANE_PIXEL_NORMALIZE_ENABLE : 0; + + intel_de_write_dsb(display, dsb, + PLANE_PIXEL_NORMALIZE(plane->pipe, plane->id), val); +} + static void icl_plane_disable_arm(struct intel_dsb *dsb, struct intel_plane *plane, @@ -913,6 +924,10 @@ icl_plane_disable_arm(struct intel_dsb *dsb, skl_write_plane_wm(dsb, plane, crtc_state); icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state); + + if (DISPLAY_VER(display) >= 35) + x3p_lpd_plane_update_pixel_normalizer(dsb, plane, false); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0); intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0); } @@ -1572,7 +1587,7 @@ icl_plane_update_noarm(struct intel_dsb *dsb, } /* FLAT CCS doesn't need to program AUX_DIST */ - if (!HAS_FLAT_CCS(to_i915(display->drm)) && DISPLAY_VER(display) < 20) + if (HAS_AUX_CCS(display)) intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id), skl_plane_aux_dist(plane_state, color_plane)); @@ -1643,6 +1658,14 @@ icl_plane_update_arm(struct intel_dsb *dsb, icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state); /* + * In order to have FBC for fp16 formats pixel normalizer block must be + * active. Check if pixel normalizer block need to be enabled for FBC. + * If needed, use normalization factor as 1.0 and enable the block. + */ + if (intel_fbc_is_enable_pixel_normalizer(plane_state)) + x3p_lpd_plane_update_pixel_normalizer(dsb, plane, true); + + /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. @@ -1724,7 +1747,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, } if (rotation & DRM_MODE_REFLECT_X && - fb->modifier == DRM_FORMAT_MOD_LINEAR) { + fb->modifier == DRM_FORMAT_MOD_LINEAR && + DISPLAY_VER(display) < 35) { drm_dbg_kms(display->drm, "[PLANE:%d:%s] horizontal flip is not supported with linear surface formats\n", plane->base.base.id, plane->base.name); @@ -1780,8 +1804,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, } /* Y-tiling is not supported in IF-ID Interlace mode */ - if (crtc_state->hw.enable && - crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && fb->modifier != DRM_FORMAT_MOD_LINEAR && fb->modifier != I915_FORMAT_MOD_X_TILED) { drm_dbg_kms(display->drm, @@ -1884,6 +1907,14 @@ static int intel_plane_min_width(struct intel_plane *plane, return 1; } +static int intel_plane_min_height(struct intel_plane *plane, + const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + return 1; +} + static int intel_plane_max_width(struct intel_plane *plane, const struct drm_framebuffer *fb, int color_plane, @@ -2015,6 +2046,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) int w = drm_rect_width(&plane_state->uapi.src) >> 16; int h = drm_rect_height(&plane_state->uapi.src) >> 16; int min_width = intel_plane_min_width(plane, fb, 0, rotation); + int min_height = intel_plane_min_height(plane, fb, 0, rotation); int max_width = intel_plane_max_width(plane, fb, 0, rotation); int max_height = intel_plane_max_height(plane, fb, 0, rotation); unsigned int alignment = plane->min_alignment(plane, fb, 0); @@ -2022,11 +2054,11 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) u32 offset; int ret; - if (w > max_width || w < min_width || h > max_height || h < 1) { + if (w > max_width || w < min_width || h > max_height || h < min_height) { drm_dbg_kms(display->drm, - "[PLANE:%d:%s] requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n", + "[PLANE:%d:%s] requested Y/RGB source size %dx%d outside limits (min: %dx%d max: %dx%d)\n", plane->base.base.id, plane->base.name, - w, h, min_width, max_width, max_height); + w, h, min_width, min_height, max_width, max_height); return -EINVAL; } @@ -2086,6 +2118,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) int uv_plane = 1; int ccs_plane = intel_fb_is_ccs_modifier(fb->modifier) ? skl_main_to_aux_plane(fb, uv_plane) : 0; + int min_width = intel_plane_min_width(plane, fb, uv_plane, rotation); + int min_height = intel_plane_min_height(plane, fb, uv_plane, rotation); int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation); int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation); int x = plane_state->uapi.src.x1 >> 17; @@ -2095,11 +2129,11 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) u32 offset; /* FIXME not quite sure how/if these apply to the chroma plane */ - if (w > max_width || h > max_height) { + if (w > max_width || w < min_width || h > max_height || h < min_height) { drm_dbg_kms(display->drm, - "[PLANE:%d:%s] CbCr source size %dx%d too big (limit %dx%d)\n", + "[PLANE:%d:%s] requested CbCr source size %dx%d outside limits (min: %dx%d max: %dx%d)\n", plane->base.base.id, plane->base.name, - w, h, max_width, max_height); + w, h, min_width, min_height, max_width, max_height); return -EINVAL; } @@ -2404,23 +2438,6 @@ void icl_link_nv12_planes(struct intel_plane_state *uv_plane_state, } } -static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe) -{ - return pipe - PIPE_A + INTEL_FBC_A; -} - -static bool skl_plane_has_fbc(struct intel_display *display, - enum intel_fbc_id fbc_id, enum plane_id plane_id) -{ - if ((DISPLAY_RUNTIME_INFO(display)->fbc_mask & BIT(fbc_id)) == 0) - return false; - - if (DISPLAY_VER(display) >= 20) - return icl_is_hdr_plane(display, plane_id); - else - return plane_id == PLANE_1; -} - static struct intel_fbc *skl_plane_fbc(struct intel_display *display, enum pipe pipe, enum plane_id plane_id) { @@ -2439,13 +2456,10 @@ static bool skl_plane_has_planar(struct intel_display *display, if (display->platform.skylake || display->platform.broxton) return false; - if (DISPLAY_VER(display) == 9 && pipe == PIPE_C) + if (pipe == PIPE_C) return false; - if (plane_id != PLANE_1 && plane_id != PLANE_2) - return false; - - return true; + return plane_id == PLANE_1 || plane_id == PLANE_2; } static const u32 *skl_get_plane_formats(struct intel_display *display, @@ -2461,11 +2475,17 @@ static const u32 *skl_get_plane_formats(struct intel_display *display, } } +static bool glk_plane_has_planar(struct intel_display *display, + enum pipe pipe, enum plane_id plane_id) +{ + return plane_id == PLANE_1 || plane_id == PLANE_2; +} + static const u32 *glk_get_plane_formats(struct intel_display *display, enum pipe pipe, enum plane_id plane_id, int *num_formats) { - if (skl_plane_has_planar(display, pipe, plane_id)) { + if (glk_plane_has_planar(display, pipe, plane_id)) { *num_formats = ARRAY_SIZE(glk_planar_formats); return glk_planar_formats; } else { @@ -2705,8 +2725,10 @@ skl_plane_disable_flip_done(struct intel_plane *plane) static bool skl_plane_has_rc_ccs(struct intel_display *display, enum pipe pipe, enum plane_id plane_id) { - return pipe != PIPE_C && - (plane_id == PLANE_1 || plane_id == PLANE_2); + if (pipe == PIPE_C) + return false; + + return plane_id == PLANE_1 || plane_id == PLANE_2; } static u8 skl_plane_caps(struct intel_display *display, @@ -2834,11 +2856,15 @@ skl_universal_plane_create(struct intel_display *display, intel_fbc_add_plane(skl_plane_fbc(display, pipe, plane_id), plane); if (DISPLAY_VER(display) >= 30) { + plane->min_width = adl_plane_min_width; plane->max_width = xe3_plane_max_width; plane->max_height = icl_plane_max_height; plane->min_cdclk = icl_plane_min_cdclk; } else if (DISPLAY_VER(display) >= 11) { - plane->min_width = icl_plane_min_width; + if (DISPLAY_VER(display) >= 14 || display->platform.alderlake_p) + plane->min_width = adl_plane_min_width; + else + plane->min_width = icl_plane_min_width; if (icl_is_hdr_plane(display, plane_id)) plane->max_width = icl_hdr_plane_max_width; else @@ -2930,7 +2956,7 @@ skl_universal_plane_create(struct intel_display *display, caps = skl_plane_caps(display, pipe, plane_id); /* FIXME: xe has problems with AUX */ - if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(to_i915(display->drm))) + if (!IS_ENABLED(I915) && HAS_AUX_CCS(display)) caps &= ~(INTEL_PLANE_CAP_CCS_RC | INTEL_PLANE_CAP_CCS_RC_CC | INTEL_PLANE_CAP_CCS_MC); @@ -3057,7 +3083,6 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, fourcc = skl_format_to_fourcc(pixel_format, val & PLANE_CTL_ORDER_RGBX, alpha); - fb->format = drm_format_info(fourcc); tiling = val & PLANE_CTL_TILED_MASK; switch (tiling) { @@ -3065,11 +3090,9 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, fb->modifier = DRM_FORMAT_MOD_LINEAR; break; case PLANE_CTL_TILED_X: - plane_config->tiling = I915_TILING_X; fb->modifier = I915_FORMAT_MOD_X_TILED; break; case PLANE_CTL_TILED_Y: - plane_config->tiling = I915_TILING_Y; if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) if (DISPLAY_VER(display) >= 14) fb->modifier = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS; @@ -3110,6 +3133,8 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, goto error; } + fb->format = drm_get_format_info(display->drm, fourcc, fb->modifier); + if (!display->params.enable_dpt && intel_fb_modifier_uses_dpt(display, fb->modifier)) { drm_dbg_kms(display->drm, "DPT disabled, skipping initial FB\n"); diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h index ca9fdfbbe57c..6f815b231340 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h +++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h @@ -324,7 +324,7 @@ #define PLANE_WM_IGNORE_LINES REG_BIT(30) #define PLANE_WM_AUTO_MIN_ALLOC_EN REG_BIT(29) #define PLANE_WM_LINES_MASK REG_GENMASK(26, 14) -#define PLANE_WM_BLOCKS_MASK REG_GENMASK(11, 0) +#define PLANE_WM_BLOCKS_MASK REG_GENMASK(12, 0) #define _PLANE_WM_SAGV_1_A 0x70258 #define _PLANE_WM_SAGV_1_B 0x71258 @@ -375,10 +375,10 @@ _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B, \ _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B) -/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */ -#define PLANE_BUF_END_MASK REG_GENMASK(27, 16) +/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits, xe3p_lpd 13 bits */ +#define PLANE_BUF_END_MASK REG_GENMASK(28, 16) #define PLANE_BUF_END(end) REG_FIELD_PREP(PLANE_BUF_END_MASK, (end)) -#define PLANE_BUF_START_MASK REG_GENMASK(11, 0) +#define PLANE_BUF_START_MASK REG_GENMASK(12, 0) #define PLANE_BUF_START(start) REG_FIELD_PREP(PLANE_BUF_START_MASK, (start)) #define _PLANE_MIN_BUF_CFG_1_A 0x70274 @@ -389,9 +389,9 @@ _PLANE_MIN_BUF_CFG_1_A, _PLANE_MIN_BUF_CFG_1_B, \ _PLANE_MIN_BUF_CFG_2_A, _PLANE_MIN_BUF_CFG_2_B) #define PLANE_AUTO_MIN_DBUF_EN REG_BIT(31) -#define PLANE_MIN_DBUF_BLOCKS_MASK REG_GENMASK(27, 16) +#define PLANE_MIN_DBUF_BLOCKS_MASK REG_GENMASK(28, 16) #define PLANE_MIN_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_MIN_DBUF_BLOCKS_MASK, (val)) -#define PLANE_INTERIM_DBUF_BLOCKS_MASK REG_GENMASK(11, 0) +#define PLANE_INTERIM_DBUF_BLOCKS_MASK REG_GENMASK(12, 0) #define PLANE_INTERIM_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_INTERIM_DBUF_BLOCKS_MASK, (val)) /* tgl+ */ @@ -455,4 +455,16 @@ _SEL_FETCH_PLANE_OFFSET_5_A, _SEL_FETCH_PLANE_OFFSET_5_B, \ _SEL_FETCH_PLANE_OFFSET_6_A, _SEL_FETCH_PLANE_OFFSET_6_B) +#define _PLANE_PIXEL_NORMALIZE_1_A 0x701a8 +#define _PLANE_PIXEL_NORMALIZE_2_A 0x702a8 +#define _PLANE_PIXEL_NORMALIZE_1_B 0x711a8 +#define _PLANE_PIXEL_NORMALIZE_2_B 0x712a8 +#define PLANE_PIXEL_NORMALIZE(pipe, plane) _MMIO_SKL_PLANE((pipe), (plane), \ + _PLANE_PIXEL_NORMALIZE_1_A, _PLANE_PIXEL_NORMALIZE_1_B, \ + _PLANE_PIXEL_NORMALIZE_2_A, _PLANE_PIXEL_NORMALIZE_2_B) +#define PLANE_PIXEL_NORMALIZE_ENABLE REG_BIT(31) +#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR_MASK REG_GENMASK(15, 0) +#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR(val) REG_FIELD_PREP(PLANE_PIXEL_NORMALIZE_NORM_FACTOR_MASK, (val)) +#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR_1_0 0x3c00 + #endif /* __SKL_UNIVERSAL_PLANE_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index d74cbb43ae6f..54e9e0be019d 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -10,7 +10,6 @@ #include "soc/intel_dram.h" #include "i915_reg.h" -#include "i915_utils.h" #include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_bw.h" @@ -23,12 +22,16 @@ #include "intel_display_regs.h" #include "intel_display_rpm.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_fb.h" #include "intel_fixed.h" #include "intel_flipq.h" #include "intel_pcode.h" #include "intel_plane.h" +#include "intel_vblank.h" #include "intel_wm.h" +#include "skl_prefill.h" +#include "skl_scaler.h" #include "skl_universal_plane_regs.h" #include "skl_watermark.h" #include "skl_watermark_regs.h" @@ -632,15 +635,22 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, { struct intel_display *display = to_intel_display(crtc_state); struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); + const struct drm_mode_config *mode_config = &display->drm->mode_config; + const struct drm_format_info *info; struct skl_wm_level wm = {}; int ret, min_ddb_alloc = 0; struct skl_wm_params wp; + u64 modifier; + u32 format; int level; - ret = skl_compute_wm_params(crtc_state, 256, - drm_format_info(DRM_FORMAT_ARGB8888), - DRM_FORMAT_MOD_LINEAR, - DRM_MODE_ROTATE_0, + format = DRM_FORMAT_ARGB8888; + modifier = DRM_FORMAT_MOD_LINEAR; + + info = drm_get_format_info(display->drm, format, modifier); + + ret = skl_compute_wm_params(crtc_state, mode_config->cursor_width, + info, modifier, DRM_MODE_ROTATE_0, crtc_state->pixel_rate, &wp, 0, 0); drm_WARN_ON(display->drm, ret); @@ -1636,26 +1646,11 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, return ret; } -static uint_fixed_16_16_t -intel_get_linetime_us(const struct intel_crtc_state *crtc_state) +static int skl_wm_linetime_us(const struct intel_crtc_state *crtc_state, + int pixel_rate) { - struct intel_display *display = to_intel_display(crtc_state); - u32 pixel_rate; - u32 crtc_htotal; - uint_fixed_16_16_t linetime_us; - - if (!crtc_state->hw.active) - return u32_to_fixed16(0); - - pixel_rate = crtc_state->pixel_rate; - - if (drm_WARN_ON(display->drm, pixel_rate == 0)) - return u32_to_fixed16(0); - - crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal; - linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); - - return linetime_us; + return DIV_ROUND_UP(crtc_state->hw.pipe_mode.crtc_htotal * 1000, + pixel_rate); } static int @@ -1743,7 +1738,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, wp->plane_blocks_per_line); - wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state)); + wp->linetime_us = skl_wm_linetime_us(crtc_state, plane_pixel_rate); return 0; } @@ -1824,6 +1819,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, if (wp->y_tiled) { selected_result = max_fixed16(method2, wp->y_tile_minimum); + } else if (DISPLAY_VER(display) >= 35) { + selected_result = method2; } else { if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal / wp->dbuf_block_size < 1) && @@ -1878,18 +1875,21 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, } else { blocks++; } - - /* - * Make sure result blocks for higher latency levels are - * at least as high as level below the current level. - * Assumption in DDB algorithm optimization for special - * cases. Also covers Display WA #1125 for RC. - */ - if (result_prev->blocks > blocks) - blocks = result_prev->blocks; } } + /* + * Make sure result blocks for higher latency levels are + * at least as high as level below the current level. + * Assumption in DDB algorithm optimization for special + * cases. Also covers Display WA #1125 for RC. + * + * Let's always do this as the algorithm can give non + * monotonic results on any platform. + */ + blocks = max_t(u32, blocks, result_prev->blocks); + lines = max_t(u32, lines, result_prev->lines); + if (DISPLAY_VER(display) >= 11) { if (wp->y_tiled) { int extra_lines; @@ -2157,103 +2157,55 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, return 0; } -static int -cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state) +unsigned int skl_wm0_prefill_lines_worst(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->uapi.state); - const struct intel_cdclk_state *cdclk_state; - - cdclk_state = intel_atomic_get_cdclk_state(state); - if (IS_ERR(cdclk_state)) { - drm_WARN_ON(display->drm, PTR_ERR(cdclk_state)); - return 1; - } - - return min(1, DIV_ROUND_UP(crtc_state->pixel_rate, - 2 * intel_cdclk_logical(cdclk_state))); -} - -static int -dsc_prefill_latency(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - const struct intel_crtc_scaler_state *scaler_state = - &crtc_state->scaler_state; - int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal, - crtc_state->hw.adjusted_mode.clock); - int num_scaler_users = hweight32(scaler_state->scaler_users); - int chroma_downscaling_factor = - crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1; - u32 dsc_prefill_latency = 0; - - if (!crtc_state->dsc.compression_enable || - !num_scaler_users || - num_scaler_users > crtc->num_scalers) - return dsc_prefill_latency; - - dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10); - - for (int i = 0; i < num_scaler_users; i++) { - u64 hscale_k, vscale_k; - - hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].hscale, 1000) >> 16); - vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].vscale, 1000) >> 16); - dsc_prefill_latency = DIV_ROUND_UP_ULL(dsc_prefill_latency * hscale_k * vscale_k, - 1000000); - } - - dsc_prefill_latency *= cdclk_prefill_adjustment(crtc_state); + struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->primary); + const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; + int ret, pixel_rate, width, level = 0; + const struct drm_format_info *info; + struct skl_wm_level wm = {}; + struct skl_wm_params wp; + unsigned int latency; + u64 modifier; + u32 format; - return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, dsc_prefill_latency); -} + /* only expected to be used for VRR guardband calculation */ + drm_WARN_ON(display->drm, !HAS_VRR(display)); -static int -scaler_prefill_latency(const struct intel_crtc_state *crtc_state) -{ - const struct intel_crtc_scaler_state *scaler_state = - &crtc_state->scaler_state; - int num_scaler_users = hweight32(scaler_state->scaler_users); - int scaler_prefill_latency = 0; - int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal, - crtc_state->hw.adjusted_mode.clock); + /* FIXME rather ugly to pick this by hand but maybe no better way? */ + format = DRM_FORMAT_XBGR16161616F; + if (HAS_4TILE(display)) + modifier = I915_FORMAT_MOD_4_TILED; + else + modifier = I915_FORMAT_MOD_Y_TILED; - if (!num_scaler_users) - return scaler_prefill_latency; + info = drm_get_format_info(display->drm, format, modifier); - scaler_prefill_latency = 4 * linetime; + pixel_rate = DIV_ROUND_UP_ULL(mul_u32_u32(skl_scaler_max_total_scale(crtc_state), + pipe_mode->crtc_clock), + 0x10000); - if (num_scaler_users > 1) { - u64 hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].hscale, 1000) >> 16); - u64 vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].vscale, 1000) >> 16); - int chroma_downscaling_factor = - crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1; - int latency; + /* FIXME limit to max plane width? */ + width = DIV_ROUND_UP_ULL(mul_u32_u32(skl_scaler_max_hscale(crtc_state), + pipe_mode->crtc_hdisplay), + 0x10000); - latency = DIV_ROUND_UP_ULL((4 * linetime * hscale_k * vscale_k * - chroma_downscaling_factor), 1000000); - scaler_prefill_latency += latency; - } + /* FIXME is 90/270 rotation worse than 0/180? */ + ret = skl_compute_wm_params(crtc_state, width, info, + modifier, DRM_MODE_ROTATE_0, + pixel_rate, &wp, 0, 1); + drm_WARN_ON(display->drm, ret); - scaler_prefill_latency *= cdclk_prefill_adjustment(crtc_state); + latency = skl_wm_latency(display, level, &wp); - return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, scaler_prefill_latency); -} + skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); -static bool -skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state, - int wm0_lines, int latency) -{ - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; + /* FIXME is this sane? */ + if (wm.min_ddb_alloc == U16_MAX) + wm.lines = skl_wm_max_lines(display); - return crtc_state->framestart_delay + - intel_usecs_to_scanlines(adjusted_mode, latency) + - scaler_prefill_latency(crtc_state) + - dsc_prefill_latency(crtc_state) + - wm0_lines > - adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start; + return wm.lines << 16; } static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state) @@ -2272,15 +2224,21 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state) return wm0_lines; } +unsigned int skl_wm0_prefill_lines(const struct intel_crtc_state *crtc_state) +{ + return skl_max_wm0_lines(crtc_state) << 16; +} + /* * TODO: In case we use PKG_C_LATENCY to allow C-states when the delayed vblank * size is too small for the package C exit latency we need to notify PSR about * the scenario to apply Wa_16025596647. */ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state, - int wm0_lines) + const struct skl_prefill_ctx *ctx) { struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); int level; for (level = display->wm.num_levels - 1; level >= 0; level--) { @@ -2295,10 +2253,13 @@ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state, if (level == 0) latency = 0; - if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency)) + if (!skl_prefill_vblank_too_short(ctx, crtc_state, latency)) return level; } + drm_dbg_kms(display->drm, "[CRTC:%d:%s] Not enough time in vblank for prefill\n", + crtc->base.base.id, crtc->base.name); + return -EINVAL; } @@ -2306,14 +2267,15 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - int wm0_lines, level; + struct skl_prefill_ctx ctx; + int level; if (!crtc_state->hw.active) return 0; - wm0_lines = skl_max_wm0_lines(crtc_state); + skl_prefill_init(&ctx, crtc_state); - level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines); + level = skl_max_wm_level_for_vblank(crtc_state, &ctx); if (level < 0) return level; @@ -2323,6 +2285,13 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) */ crtc_state->wm_level_disabled = level < display->wm.num_levels - 1; + /* + * TODO: assert that we are in fact using the maximum guardband + * if we end up disabling any WM levels here. Otherwise we clearly + * failed in using a realistic worst case prefill estimate when + * determining the guardband size. + */ + for (level++; level < display->wm.num_levels; level++) { enum plane_id plane_id; @@ -2341,8 +2310,8 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) if (DISPLAY_VER(display) >= 12 && display->sagv.block_time_us && - skl_is_vblank_too_short(crtc_state, wm0_lines, - display->sagv.block_time_us)) { + skl_prefill_vblank_too_short(&ctx, crtc_state, + display->sagv.block_time_us)) { enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) { @@ -3174,12 +3143,60 @@ void skl_watermark_ipc_init(struct intel_display *display) skl_watermark_ipc_update(display); } -static void -adjust_wm_latency(struct intel_display *display, - u16 wm[], int num_levels, int read_latency) +static void multiply_wm_latency(struct intel_display *display, int mult) +{ + u16 *wm = display->wm.skl_latency; + int level, num_levels = display->wm.num_levels; + + for (level = 0; level < num_levels; level++) + wm[level] *= mult; +} + +static void increase_wm_latency(struct intel_display *display, int inc) +{ + u16 *wm = display->wm.skl_latency; + int level, num_levels = display->wm.num_levels; + + wm[0] += inc; + + for (level = 1; level < num_levels; level++) { + if (wm[level] == 0) + break; + + wm[level] += inc; + } +} + +static bool need_16gb_dimm_wa(struct intel_display *display) { const struct dram_info *dram_info = intel_dram_info(display->drm); - int i, level; + + return (display->platform.skylake || display->platform.kabylake || + display->platform.coffeelake || display->platform.cometlake || + DISPLAY_VER(display) == 11) && dram_info->has_16gb_dimms; +} + +static int wm_read_latency(struct intel_display *display) +{ + if (DISPLAY_VER(display) >= 14) + return 6; + else if (DISPLAY_VER(display) >= 12) + return 3; + else + return 2; +} + +static void sanitize_wm_latency(struct intel_display *display) +{ + u16 *wm = display->wm.skl_latency; + int level, num_levels = display->wm.num_levels; + + /* + * Xe3p and beyond should ignore level 0's reported latency and + * always apply WaWmMemoryReadLatency logic. + */ + if (DISPLAY_VER(display) >= 35) + wm[0] = 0; /* * If a level n (n > 1) has a 0us latency, all levels m (m >= n) @@ -3187,14 +3204,38 @@ adjust_wm_latency(struct intel_display *display, * of the punit to satisfy this requirement. */ for (level = 1; level < num_levels; level++) { - if (wm[level] == 0) { - for (i = level + 1; i < num_levels; i++) - wm[i] = 0; + if (wm[level] == 0) + break; + } - num_levels = level; + for (level = level + 1; level < num_levels; level++) + wm[level] = 0; +} + +static void make_wm_latency_monotonic(struct intel_display *display) +{ + u16 *wm = display->wm.skl_latency; + int level, num_levels = display->wm.num_levels; + + for (level = 1; level < num_levels; level++) { + if (wm[level] == 0) break; - } + + wm[level] = max(wm[level], wm[level-1]); } +} + +static void +adjust_wm_latency(struct intel_display *display) +{ + u16 *wm = display->wm.skl_latency; + + if (display->platform.dg2) + multiply_wm_latency(display, 2); + + sanitize_wm_latency(display); + + make_wm_latency_monotonic(display); /* * WaWmMemoryReadLatency @@ -3203,24 +3244,22 @@ adjust_wm_latency(struct intel_display *display, * to add proper adjustment to each valid level we retrieve * from the punit when level 0 response data is 0us. */ - if (wm[0] == 0) { - for (level = 0; level < num_levels; level++) - wm[level] += read_latency; - } + if (wm[0] == 0) + increase_wm_latency(display, wm_read_latency(display)); /* - * WA Level-0 adjustment for 16Gb DIMMs: SKL+ + * WA Level-0 adjustment for 16Gb+ DIMMs: SKL+ * If we could not get dimm info enable this WA to prevent from - * any underrun. If not able to get DIMM info assume 16Gb DIMM + * any underrun. If not able to get DIMM info assume 16Gb+ DIMM * to avoid any underrun. */ - if (!display->platform.dg2 && dram_info->has_16gb_dimms) - wm[0] += 1; + if (need_16gb_dimm_wa(display)) + increase_wm_latency(display, 1); } -static void mtl_read_wm_latency(struct intel_display *display, u16 wm[]) +static void mtl_read_wm_latency(struct intel_display *display) { - int num_levels = display->wm.num_levels; + u16 *wm = display->wm.skl_latency; u32 val; val = intel_de_read(display, MTL_LATENCY_LP0_LP1); @@ -3234,15 +3273,11 @@ static void mtl_read_wm_latency(struct intel_display *display, u16 wm[]) val = intel_de_read(display, MTL_LATENCY_LP4_LP5); wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); - - adjust_wm_latency(display, wm, num_levels, 6); } -static void skl_read_wm_latency(struct intel_display *display, u16 wm[]) +static void skl_read_wm_latency(struct intel_display *display) { - int num_levels = display->wm.num_levels; - int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2; - int mult = display->platform.dg2 ? 2 : 1; + u16 *wm = display->wm.skl_latency; u32 val; int ret; @@ -3254,10 +3289,10 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[]) return; } - wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult; - wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult; - wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; - wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; + wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val); + wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val); + wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val); + wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val); /* read the second set of memory latencies[4:7] */ val = 1; /* data0 to be programmed to 1 for second set */ @@ -3267,12 +3302,10 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[]) return; } - wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult; - wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult; - wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; - wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; - - adjust_wm_latency(display, wm, num_levels, read_latency); + wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val); + wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val); + wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val); + wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val); } static void skl_setup_wm_latency(struct intel_display *display) @@ -3283,11 +3316,15 @@ static void skl_setup_wm_latency(struct intel_display *display) display->wm.num_levels = 8; if (DISPLAY_VER(display) >= 14) - mtl_read_wm_latency(display, display->wm.skl_latency); + mtl_read_wm_latency(display); else - skl_read_wm_latency(display, display->wm.skl_latency); + skl_read_wm_latency(display); - intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency); + intel_print_wm_latency(display, "original", display->wm.skl_latency); + + adjust_wm_latency(display); + + intel_print_wm_latency(display, "adjusted", display->wm.skl_latency); } static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj) @@ -3456,7 +3493,10 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display, if (!HAS_MBUS_JOINING(display)) return; - if (DISPLAY_VER(display) >= 20) + if (DISPLAY_VER(display) >= 35) + intel_de_rmw(display, MBUS_CTL, XE3P_MBUS_TRANSLATION_THROTTLE_MIN_MASK, + XE3P_MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1)); + else if (DISPLAY_VER(display) >= 20) intel_de_rmw(display, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK, MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1)); @@ -3467,9 +3507,14 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display, ratio, str_yes_no(joined_mbus)); for_each_dbuf_slice(display, slice) - intel_de_rmw(display, DBUF_CTL_S(slice), - DBUF_MIN_TRACKER_STATE_SERVICE_MASK, - DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1)); + if (DISPLAY_VER(display) >= 35) + intel_de_rmw(display, DBUF_CTL_S(slice), + XE3P_DBUF_MIN_TRACKER_STATE_SERVICE_MASK, + XE3P_DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1)); + else + intel_de_rmw(display, DBUF_CTL_S(slice), + DBUF_MIN_TRACKER_STATE_SERVICE_MASK, + DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1)); } static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state) diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h index 62790816f030..6bc2ec9164bf 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.h +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -79,5 +79,8 @@ void intel_program_dpkgc_latency(struct intel_atomic_state *state); bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state); +unsigned int skl_wm0_prefill_lines_worst(const struct intel_crtc_state *crtc_state); +unsigned int skl_wm0_prefill_lines(const struct intel_crtc_state *crtc_state); + #endif /* __SKL_WATERMARK_H__ */ diff --git a/drivers/gpu/drm/i915/display/skl_watermark_regs.h b/drivers/gpu/drm/i915/display/skl_watermark_regs.h index c5572fc0e847..abf56ac31105 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark_regs.h +++ b/drivers/gpu/drm/i915/display/skl_watermark_regs.h @@ -32,16 +32,18 @@ #define MBUS_BBOX_CTL_S1 _MMIO(0x45040) #define MBUS_BBOX_CTL_S2 _MMIO(0x45044) -#define MBUS_CTL _MMIO(0x4438C) -#define MBUS_JOIN REG_BIT(31) -#define MBUS_HASHING_MODE_MASK REG_BIT(30) -#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0) -#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1) -#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26) -#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe) -#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7) -#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13) -#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val) +#define MBUS_CTL _MMIO(0x4438C) +#define MBUS_JOIN REG_BIT(31) +#define MBUS_HASHING_MODE_MASK REG_BIT(30) +#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0) +#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1) +#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26) +#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe) +#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7) +#define XE3P_MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(16, 13) +#define XE3P_MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(XE3P_MBUS_TRANSLATION_THROTTLE_MIN_MASK, val) +#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13) +#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val) /* * The below are numbered starting from "S1" on gen11/gen12, but starting @@ -51,20 +53,22 @@ * way things will be named by the hardware team going forward, plus it's more * consistent with how most of the rest of our registers are named. */ -#define _DBUF_CTL_S0 0x45008 -#define _DBUF_CTL_S1 0x44FE8 -#define _DBUF_CTL_S2 0x44300 -#define _DBUF_CTL_S3 0x44304 -#define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \ - _DBUF_CTL_S0, \ - _DBUF_CTL_S1, \ - _DBUF_CTL_S2, \ - _DBUF_CTL_S3)) -#define DBUF_POWER_REQUEST REG_BIT(31) -#define DBUF_POWER_STATE REG_BIT(30) -#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19) -#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x) -#define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */ +#define _DBUF_CTL_S0 0x45008 +#define _DBUF_CTL_S1 0x44FE8 +#define _DBUF_CTL_S2 0x44300 +#define _DBUF_CTL_S3 0x44304 +#define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \ + _DBUF_CTL_S0, \ + _DBUF_CTL_S1, \ + _DBUF_CTL_S2, \ + _DBUF_CTL_S3)) +#define DBUF_POWER_REQUEST REG_BIT(31) +#define DBUF_POWER_STATE REG_BIT(30) +#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19) +#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x) +#define XE3P_DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(20, 16) +#define XE3P_DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(XE3P_DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x) +#define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */ #define DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x) /* ADL-P+ */ #define MTL_LATENCY_LP0_LP1 _MMIO(0x45780) diff --git a/drivers/gpu/drm/i915/display/vlv_clock.c b/drivers/gpu/drm/i915/display/vlv_clock.c new file mode 100644 index 000000000000..1abdae453514 --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_clock.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include <drm/drm_print.h> + +#include "intel_display_core.h" +#include "intel_display_types.h" +#include "vlv_clock.h" +#include "vlv_sideband.h" + +/* + * FIXME: The caching of hpll_freq and czclk_freq relies on the first calls + * occurring at a time when they can actually be read. This appears to be the + * case, but is somewhat fragile. Make the initialization explicit at a point + * where they can be reliably read. + */ + +/* returns HPLL frequency in kHz */ +int vlv_clock_get_hpll_vco(struct drm_device *drm) +{ + struct intel_display *display = to_intel_display(drm); + int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; + + if (!display->vlv_clock.hpll_freq) { + vlv_cck_get(drm); + /* Obtain SKU information */ + hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) & + CCK_FUSE_HPLL_FREQ_MASK; + vlv_cck_put(drm); + + display->vlv_clock.hpll_freq = vco_freq[hpll_freq] * 1000; + + drm_dbg_kms(drm, "HPLL frequency: %d kHz\n", display->vlv_clock.hpll_freq); + } + + return display->vlv_clock.hpll_freq; +} + +static int vlv_clock_get_cck(struct drm_device *drm, + const char *name, u32 reg, int ref_freq) +{ + u32 val; + int divider; + + vlv_cck_get(drm); + val = vlv_cck_read(drm, reg); + vlv_cck_put(drm); + + divider = val & CCK_FREQUENCY_VALUES; + + drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) != + (divider << CCK_FREQUENCY_STATUS_SHIFT), + "%s change in progress\n", name); + + return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); +} + +int vlv_clock_get_hrawclk(struct drm_device *drm) +{ + /* RAWCLK_FREQ_VLV register updated from power well code */ + return vlv_clock_get_cck(drm, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL, + vlv_clock_get_hpll_vco(drm)); +} + +int vlv_clock_get_czclk(struct drm_device *drm) +{ + struct intel_display *display = to_intel_display(drm); + + if (!display->vlv_clock.czclk_freq) { + display->vlv_clock.czclk_freq = vlv_clock_get_cck(drm, "czclk", CCK_CZ_CLOCK_CONTROL, + vlv_clock_get_hpll_vco(drm)); + drm_dbg_kms(drm, "CZ clock rate: %d kHz\n", display->vlv_clock.czclk_freq); + } + + return display->vlv_clock.czclk_freq; +} + +int vlv_clock_get_cdclk(struct drm_device *drm) +{ + return vlv_clock_get_cck(drm, "cdclk", CCK_DISPLAY_CLOCK_CONTROL, + vlv_clock_get_hpll_vco(drm)); +} + +int vlv_clock_get_gpll(struct drm_device *drm) +{ + return vlv_clock_get_cck(drm, "GPLL ref", CCK_GPLL_CLOCK_CONTROL, + vlv_clock_get_czclk(drm)); +} diff --git a/drivers/gpu/drm/i915/display/vlv_clock.h b/drivers/gpu/drm/i915/display/vlv_clock.h new file mode 100644 index 000000000000..5742ed3c628d --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_clock.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __VLV_CLOCK_H__ +#define __VLV_CLOCK_H__ + +struct drm_device; + +#ifdef I915 +int vlv_clock_get_hpll_vco(struct drm_device *drm); +int vlv_clock_get_hrawclk(struct drm_device *drm); +int vlv_clock_get_czclk(struct drm_device *drm); +int vlv_clock_get_cdclk(struct drm_device *drm); +int vlv_clock_get_gpll(struct drm_device *drm); +#else +static inline int vlv_clock_get_hpll_vco(struct drm_device *drm) +{ + return 0; +} +static inline int vlv_clock_get_hrawclk(struct drm_device *drm) +{ + return 0; +} +static inline int vlv_clock_get_czclk(struct drm_device *drm) +{ + return 0; +} +static inline int vlv_clock_get_cdclk(struct drm_device *drm) +{ + return 0; +} +static inline int vlv_clock_get_gpll(struct drm_device *drm) +{ + return 0; +} +#endif + +#endif /* __VLV_CLOCK_H__ */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index c9a53fde79c4..19bdd8662359 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -34,7 +34,6 @@ #include <drm/drm_probe_helper.h> #include "i915_reg.h" -#include "i915_utils.h" #include "intel_atomic.h" #include "intel_backlight.h" #include "intel_connector.h" @@ -42,6 +41,7 @@ #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_utils.h" #include "intel_dsi.h" #include "intel_dsi_vbt.h" #include "intel_fifo_underrun.h" @@ -94,8 +94,8 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; - if (intel_de_wait_for_set(display, MIPI_GEN_FIFO_STAT(display, port), - mask, 100)) + if (intel_de_wait_for_set_ms(display, MIPI_GEN_FIFO_STAT(display, port), + mask, 100)) drm_err(display->drm, "DPI FIFOs are not empty\n"); } @@ -162,8 +162,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, /* note: this is never true for reads */ if (packet.payload_length) { - if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port), - data_mask, 50)) + if (intel_de_wait_for_clear_ms(display, MIPI_GEN_FIFO_STAT(display, port), + data_mask, 50)) drm_err(display->drm, "Timeout waiting for HS/LP DATA FIFO !full\n"); @@ -176,8 +176,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, GEN_READ_DATA_AVAIL); } - if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port), - ctrl_mask, 50)) { + if (intel_de_wait_for_clear_ms(display, MIPI_GEN_FIFO_STAT(display, port), + ctrl_mask, 50)) { drm_err(display->drm, "Timeout waiting for HS/LP CTRL FIFO !full\n"); } @@ -188,8 +188,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, /* ->rx_len is set only for reads */ if (msg->rx_len) { data_mask = GEN_READ_DATA_AVAIL; - if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port), - data_mask, 50)) + if (intel_de_wait_for_set_ms(display, MIPI_INTR_STAT(display, port), + data_mask, 50)) drm_err(display->drm, "Timeout waiting for read data.\n"); @@ -246,7 +246,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, intel_de_write(display, MIPI_DPI_CONTROL(display, port), cmd); mask = SPL_PKT_SENT_INTERRUPT; - if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port), mask, 100)) + if (intel_de_wait_for_set_ms(display, MIPI_INTR_STAT(display, port), mask, 100)) drm_err(display->drm, "Video mode command 0x%08x send failed.\n", cmd); @@ -352,8 +352,8 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder) /* Wait for Pwr ACK */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_set(display, MIPI_CTRL(display, port), - GLK_MIPIIO_PORT_POWERED, 20)) + if (intel_de_wait_for_set_ms(display, MIPI_CTRL(display, port), + GLK_MIPIIO_PORT_POWERED, 20)) drm_err(display->drm, "MIPIO port is powergated\n"); } @@ -374,8 +374,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) /* Wait for MIPI PHY status bit to set */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_set(display, MIPI_CTRL(display, port), - GLK_PHY_STATUS_PORT_READY, 20)) + if (intel_de_wait_for_set_ms(display, MIPI_CTRL(display, port), + GLK_PHY_STATUS_PORT_READY, 20)) drm_err(display->drm, "PHY is not ON\n"); } @@ -394,8 +394,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); /* Wait for ULPS active */ - if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port), - GLK_ULPS_NOT_ACTIVE, 20)) + if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port), + GLK_ULPS_NOT_ACTIVE, 20)) drm_err(display->drm, "ULPS not active\n"); /* Exit ULPS */ @@ -413,16 +413,16 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) /* Wait for Stop state */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_set(display, MIPI_CTRL(display, port), - GLK_DATA_LANE_STOP_STATE, 20)) + if (intel_de_wait_for_set_ms(display, MIPI_CTRL(display, port), + GLK_DATA_LANE_STOP_STATE, 20)) drm_err(display->drm, "Date lane not in STOP state\n"); } /* Wait for AFE LATCH */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_set(display, BXT_MIPI_PORT_CTRL(port), - AFE_LATCHOUT, 20)) + if (intel_de_wait_for_set_ms(display, BXT_MIPI_PORT_CTRL(port), + AFE_LATCHOUT, 20)) drm_err(display->drm, "D-PHY not entering LP-11 state\n"); } @@ -519,15 +519,15 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port), - GLK_PHY_STATUS_PORT_READY, 20)) + if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port), + GLK_PHY_STATUS_PORT_READY, 20)) drm_err(display->drm, "PHY is not turning OFF\n"); } /* Wait for Pwr ACK bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port), - GLK_MIPIIO_PORT_POWERED, 20)) + if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port), + GLK_MIPIIO_PORT_POWERED, 20)) drm_err(display->drm, "MIPI IO Port is not powergated\n"); } @@ -544,8 +544,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port), - GLK_PHY_STATUS_PORT_READY, 20)) + if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port), + GLK_PHY_STATUS_PORT_READY, 20)) drm_err(display->drm, "PHY is not turning OFF\n"); } @@ -595,8 +595,8 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) * Port A only. MIPI Port C has no similar bit for checking. */ if ((display->platform.broxton || port == PORT_A) && - intel_de_wait_for_clear(display, port_ctrl, - AFE_LATCHOUT, 30)) + intel_de_wait_for_clear_ms(display, port_ctrl, + AFE_LATCHOUT, 30)) drm_err(display->drm, "DSI LP not going Low\n"); /* Disable MIPI PHY transparent latch */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index f078b9cda96c..a2da6285890b 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -319,8 +319,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder) * PLL lock should deassert within 200us. * Wait up to 1ms before timing out. */ - if (intel_de_wait_for_clear(display, BXT_DSI_PLL_ENABLE, - BXT_DSI_PLL_LOCKED, 1)) + if (intel_de_wait_for_clear_ms(display, BXT_DSI_PLL_ENABLE, + BXT_DSI_PLL_LOCKED, 1)) drm_err(display->drm, "Timeout waiting for PLL lock deassertion\n"); } @@ -568,8 +568,8 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, intel_de_rmw(display, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE); /* Timeout and fail if PLL not locked */ - if (intel_de_wait_for_set(display, BXT_DSI_PLL_ENABLE, - BXT_DSI_PLL_LOCKED, 1)) { + if (intel_de_wait_for_set_ms(display, BXT_DSI_PLL_ENABLE, + BXT_DSI_PLL_LOCKED, 1)) { drm_err(display->drm, "Timed out waiting for DSI PLL to lock\n"); return; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index ed6599694835..3215ef49c975 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -68,6 +68,7 @@ #include <linux/nospec.h> #include <drm/drm_cache.h> +#include <drm/drm_print.h> #include <drm/drm_syncobj.h> #include "gt/gen6_ppgtt.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index c3e6a325872d..189ecdd0a9c1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -4,6 +4,7 @@ */ #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include "display/intel_display.h" #include "gem/i915_gem_ioctls.h" @@ -193,8 +194,8 @@ i915_gem_dumb_create(struct drm_file *file, args->pitch = ALIGN(args->width * cpp, 64); /* align stride to page size so that we can remap */ - if (args->pitch > intel_plane_fb_max_stride(dev, format, - DRM_FORMAT_MOD_LINEAR)) + if (args->pitch > intel_dumb_fb_max_stride(dev, format, + DRM_FORMAT_MOD_LINEAR)) args->pitch = ALIGN(args->pitch, 4096); if (args->pitch < args->width) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 39c7c32e1e74..b057c2fa03a4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -9,6 +9,7 @@ #include <linux/uaccess.h> #include <drm/drm_auth.h> +#include <drm/drm_print.h> #include <drm/drm_syncobj.h> #include "gem/i915_gem_ioctls.h" @@ -142,7 +143,7 @@ enum { * we want to leave the object where it is and for all the existing relocations * to match. If the object is given a new address, or if userspace thinks the * object is elsewhere, we have to parse all the relocation entries and update - * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that + * the addresses. Userspace can set the I915_EXEC_NO_RELOC flag to hint that * all the target addresses in all of its objects match the value in the * relocation entries and that they all match the presumed offsets given by the * list of execbuffer objects. Using this knowledge, we know that if we haven't diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 75f5b0e871ef..4542135b20d5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -16,12 +16,13 @@ #include "i915_gem_evict.h" #include "i915_gem_gtt.h" #include "i915_gem_ioctls.h" -#include "i915_gem_object.h" #include "i915_gem_mman.h" +#include "i915_gem_object.h" +#include "i915_gem_ttm.h" +#include "i915_jiffies.h" #include "i915_mm.h" #include "i915_trace.h" #include "i915_user_extensions.h" -#include "i915_gem_ttm.h" #include "i915_vma.h" static inline bool diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 478011e5ecb3..3f6f040c359d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -27,6 +27,7 @@ #include <linux/sched/mm.h> #include <drm/drm_cache.h> +#include <drm/drm_print.h> #include "display/intel_frontbuffer.h" #include "pxp/intel_pxp.h" @@ -476,24 +477,24 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj) void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, enum fb_op_origin origin) { - struct intel_frontbuffer *front; + struct i915_frontbuffer *front; - front = i915_gem_object_get_frontbuffer(obj); + front = i915_gem_object_frontbuffer_lookup(obj); if (front) { - intel_frontbuffer_flush(front, origin); - intel_frontbuffer_put(front); + intel_frontbuffer_flush(&front->base, origin); + i915_gem_object_frontbuffer_put(front); } } void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, enum fb_op_origin origin) { - struct intel_frontbuffer *front; + struct i915_frontbuffer *front; - front = i915_gem_object_get_frontbuffer(obj); + front = i915_gem_object_frontbuffer_lookup(obj); if (front) { - intel_frontbuffer_invalidate(front, origin); - intel_frontbuffer_put(front); + intel_frontbuffer_invalidate(&front->base, origin); + i915_gem_object_frontbuffer_put(front); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 148034ef504d..8878539c10ed 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -802,6 +802,7 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj) void i915_gem_fence_wait_priority(struct dma_fence *fence, const struct i915_sched_attr *attr); +void i915_gem_fence_wait_priority_display(struct dma_fence *fence); int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c new file mode 100644 index 000000000000..aaa15e7b3f17 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include "i915_drv.h" +#include "i915_gem_object_frontbuffer.h" + +static int frontbuffer_active(struct i915_active *ref) +{ + struct i915_frontbuffer *front = + container_of(ref, typeof(*front), write); + + kref_get(&front->ref); + return 0; +} + +static void frontbuffer_retire(struct i915_active *ref) +{ + struct i915_frontbuffer *front = + container_of(ref, typeof(*front), write); + + intel_frontbuffer_flush(&front->base, ORIGIN_CS); + i915_gem_object_frontbuffer_put(front); +} + +struct i915_frontbuffer * +i915_gem_object_frontbuffer_get(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_frontbuffer *front, *cur; + + front = i915_gem_object_frontbuffer_lookup(obj); + if (front) + return front; + + front = kmalloc(sizeof(*front), GFP_KERNEL); + if (!front) + return NULL; + + intel_frontbuffer_init(&front->base, &i915->drm); + + kref_init(&front->ref); + i915_gem_object_get(obj); + front->obj = obj; + + i915_active_init(&front->write, + frontbuffer_active, + frontbuffer_retire, + I915_ACTIVE_RETIRE_SLEEPS); + + spin_lock(&i915->frontbuffer_lock); + if (rcu_access_pointer(obj->frontbuffer)) { + cur = rcu_dereference_protected(obj->frontbuffer, true); + kref_get(&cur->ref); + } else { + cur = front; + rcu_assign_pointer(obj->frontbuffer, front); + } + spin_unlock(&i915->frontbuffer_lock); + + if (cur != front) { + i915_gem_object_put(obj); + intel_frontbuffer_fini(&front->base); + kfree(front); + } + + return cur; +} + +void i915_gem_object_frontbuffer_ref(struct i915_frontbuffer *front) +{ + kref_get(&front->ref); +} + +static void frontbuffer_release(struct kref *ref) + __releases(&i915->frontbuffer_lock) +{ + struct i915_frontbuffer *front = + container_of(ref, typeof(*front), ref); + struct drm_i915_gem_object *obj = front->obj; + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + i915_ggtt_clear_scanout(obj); + + RCU_INIT_POINTER(obj->frontbuffer, NULL); + + spin_unlock(&i915->frontbuffer_lock); + + i915_active_fini(&front->write); + + i915_gem_object_put(obj); + + intel_frontbuffer_fini(&front->base); + + kfree_rcu(front, rcu); +} + +void i915_gem_object_frontbuffer_put(struct i915_frontbuffer *front) +{ + struct drm_i915_private *i915 = to_i915(front->obj->base.dev); + + kref_put_lock(&front->ref, frontbuffer_release, + &i915->frontbuffer_lock); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h index b682969e3a29..2133e29047c5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h @@ -12,6 +12,14 @@ #include "display/intel_frontbuffer.h" #include "i915_gem_object_types.h" +struct i915_frontbuffer { + struct intel_frontbuffer base; + struct drm_i915_gem_object *obj; + struct i915_active write; + struct rcu_head rcu; + struct kref ref; +}; + void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, enum fb_op_origin origin); void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, @@ -33,19 +41,23 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, __i915_gem_object_invalidate_frontbuffer(obj, origin); } +struct i915_frontbuffer *i915_gem_object_frontbuffer_get(struct drm_i915_gem_object *obj); +void i915_gem_object_frontbuffer_ref(struct i915_frontbuffer *front); +void i915_gem_object_frontbuffer_put(struct i915_frontbuffer *front); + /** - * i915_gem_object_get_frontbuffer - Get the object's frontbuffer - * @obj: The object whose frontbuffer to get. + * i915_gem_object_frontbuffer_lookup - Look up the object's frontbuffer + * @obj: The object whose frontbuffer to look up. * * Get pointer to object's frontbuffer if such exists. Please note that RCU * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. * * Return: pointer to object's frontbuffer is such exists or NULL */ -static inline struct intel_frontbuffer * -i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj) +static inline struct i915_frontbuffer * +i915_gem_object_frontbuffer_lookup(const struct drm_i915_gem_object *obj) { - struct intel_frontbuffer *front; + struct i915_frontbuffer *front; if (likely(!rcu_access_pointer(obj->frontbuffer))) return NULL; @@ -62,41 +74,11 @@ i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj) if (likely(front == rcu_access_pointer(obj->frontbuffer))) break; - intel_frontbuffer_put(front); + i915_gem_object_frontbuffer_put(front); } while (1); rcu_read_unlock(); return front; } -/** - * i915_gem_object_set_frontbuffer - Set the object's frontbuffer - * @obj: The object whose frontbuffer to set. - * @front: The frontbuffer to set - * - * Set object's frontbuffer pointer. If frontbuffer is already set for the - * object keep it and return it's pointer to the caller. Please note that RCU - * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This - * function is protected by i915->display->fb_tracking.lock - * - * Return: pointer to frontbuffer which was set. - */ -static inline struct intel_frontbuffer * -i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj, - struct intel_frontbuffer *front) -{ - struct intel_frontbuffer *cur = front; - - if (!front) { - RCU_INIT_POINTER(obj->frontbuffer, NULL); - } else if (rcu_access_pointer(obj->frontbuffer)) { - cur = rcu_dereference_protected(obj->frontbuffer, true); - kref_get(&cur->ref); - } else { - rcu_assign_pointer(obj->frontbuffer, front); - } - - return cur; -} - #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 64600aa8227f..465ce94aee76 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -574,7 +574,7 @@ struct drm_i915_gem_object { */ u16 write_domain; - struct intel_frontbuffer __rcu *frontbuffer; + struct i915_frontbuffer __rcu *frontbuffer; /** Current tiling stride for the object, if it's tiled. */ unsigned int tiling_and_stride; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 3f09cbce05bb..c2f8e5f95696 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -3,9 +3,11 @@ * Copyright © 2014-2016 Intel Corporation */ +#include <linux/vmalloc.h> + #include <drm/drm_cache.h> #include <drm/drm_panic.h> -#include <linux/vmalloc.h> +#include <drm/drm_print.h> #include "display/intel_fb.h" #include "display/intel_display_types.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index f9e7cab140f8..bc799f182850 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -8,6 +8,7 @@ #include <linux/swap.h> #include <drm/drm_cache.h> +#include <drm/drm_print.h> #include "gt/intel_gt.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index b9dae15c1d16..26dda55a07ff 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -441,11 +441,20 @@ shmem_pwrite(struct drm_i915_gem_object *obj, written = file->f_op->write_iter(&kiocb, &iter); BUG_ON(written == -EIOCBQUEUED); - if (written != size) - return -EIO; - + /* + * First, check if write_iter returned a negative error. + * If the write failed, return the real error code immediately. + * This prevents it from being overwritten by the short write check below. + */ if (written < 0) return written; + /* + * Check for a short write (written bytes != requested size). + * Even if some data was written, return -EIO to indicate that the + * write was not fully completed. + */ + if (written != size) + return -EIO; return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 7a3e74a6676e..e0d1f369a163 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -12,6 +12,8 @@ #include <linux/dma-buf.h> #include <linux/vmalloc.h> +#include <drm/drm_print.h> + #include "gt/intel_gt_requests.h" #include "gt/intel_gt.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 3380151edfc1..f859c99f969b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -7,6 +7,7 @@ #include <linux/mutex.h> #include <drm/drm_mm.h> +#include <drm/drm_print.h> #include <drm/intel/i915_drm.h> #include "gem/i915_gem_lmem.h" @@ -24,6 +25,11 @@ #include "intel_mchbar_regs.h" #include "intel_pci_config.h" +struct intel_stolen_node { + struct drm_i915_private *i915; + struct drm_mm_node node; +}; + /* * The BIOS typically reserves some of the system's memory for the exclusive * use of the integrated graphics. This memory is no longer available for @@ -36,9 +42,9 @@ * for is a boon. */ -int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, - struct drm_mm_node *node, u64 size, - unsigned alignment, u64 start, u64 end) +static int __i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, + struct drm_mm_node *node, u64 size, + unsigned int alignment, u64 start, u64 end) { int ret; @@ -58,24 +64,43 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, return ret; } -int i915_gem_stolen_insert_node(struct drm_i915_private *i915, - struct drm_mm_node *node, u64 size, - unsigned alignment) +int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size, + unsigned int alignment, u64 start, u64 end) +{ + return __i915_gem_stolen_insert_node_in_range(node->i915, &node->node, + size, alignment, + start, end); +} + +static int __i915_gem_stolen_insert_node(struct drm_i915_private *i915, + struct drm_mm_node *node, u64 size, + unsigned int alignment) { - return i915_gem_stolen_insert_node_in_range(i915, node, - size, alignment, - I915_GEM_STOLEN_BIAS, - U64_MAX); + return __i915_gem_stolen_insert_node_in_range(i915, node, + size, alignment, + I915_GEM_STOLEN_BIAS, + U64_MAX); } -void i915_gem_stolen_remove_node(struct drm_i915_private *i915, - struct drm_mm_node *node) +int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size, + unsigned int alignment) +{ + return __i915_gem_stolen_insert_node(node->i915, &node->node, size, alignment); +} + +static void __i915_gem_stolen_remove_node(struct drm_i915_private *i915, + struct drm_mm_node *node) { mutex_lock(&i915->mm.stolen_lock); drm_mm_remove_node(node); mutex_unlock(&i915->mm.stolen_lock); } +void i915_gem_stolen_remove_node(struct intel_stolen_node *node) +{ + __i915_gem_stolen_remove_node(node->i915, &node->node); +} + static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm) { return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start; @@ -683,7 +708,7 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); GEM_BUG_ON(!stolen); - i915_gem_stolen_remove_node(i915, stolen); + __i915_gem_stolen_remove_node(i915, stolen); kfree(stolen); i915_gem_object_release_memory_region(obj); @@ -772,8 +797,8 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); mutex_unlock(&i915->mm.stolen_lock); } else { - ret = i915_gem_stolen_insert_node(i915, stolen, size, - mem->min_page_size); + ret = __i915_gem_stolen_insert_node(i915, stolen, size, + mem->min_page_size); } if (ret) goto err_free; @@ -785,7 +810,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, return 0; err_remove: - i915_gem_stolen_remove_node(i915, stolen); + __i915_gem_stolen_remove_node(i915, stolen); err_free: kfree(stolen); return ret; @@ -1000,38 +1025,64 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) return obj->ops == &i915_gem_object_stolen_ops; } -bool i915_gem_stolen_initialized(const struct drm_i915_private *i915) +bool i915_gem_stolen_initialized(struct drm_device *drm) { + struct drm_i915_private *i915 = to_i915(drm); + return drm_mm_initialized(&i915->mm.stolen); } -u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915) +u64 i915_gem_stolen_area_address(struct drm_device *drm) { + struct drm_i915_private *i915 = to_i915(drm); + return i915->dsm.stolen.start; } -u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915) +u64 i915_gem_stolen_area_size(struct drm_device *drm) { + struct drm_i915_private *i915 = to_i915(drm); + return resource_size(&i915->dsm.stolen); } -u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915, - const struct drm_mm_node *node) +u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node) { + struct drm_i915_private *i915 = node->i915; + return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node); } -bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node) +bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node) +{ + return drm_mm_node_allocated(&node->node); +} + +u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node) { - return drm_mm_node_allocated(node); + return node->node.start; } -u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node) +u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node) { - return node->start; + return node->node.size; +} + +struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm) +{ + struct drm_i915_private *i915 = to_i915(drm); + struct intel_stolen_node *node; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return NULL; + + node->i915 = i915; + + return node; } -u64 i915_gem_stolen_node_size(const struct drm_mm_node *node) +void i915_gem_stolen_node_free(const struct intel_stolen_node *node) { - return node->size; + kfree(node); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h index dfe0db8bb1b9..7b0386002ed4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h @@ -8,21 +8,17 @@ #include <linux/types.h> -struct drm_i915_private; -struct drm_mm_node; +struct drm_device; struct drm_i915_gem_object; +struct drm_i915_private; +struct intel_stolen_node; -#define i915_stolen_fb drm_mm_node - -int i915_gem_stolen_insert_node(struct drm_i915_private *i915, - struct drm_mm_node *node, u64 size, +int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size, unsigned alignment); -int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, - struct drm_mm_node *node, u64 size, +int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size, unsigned alignment, u64 start, u64 end); -void i915_gem_stolen_remove_node(struct drm_i915_private *i915, - struct drm_mm_node *node); +void i915_gem_stolen_remove_node(struct intel_stolen_node *node); struct intel_memory_region * i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, u16 instance); @@ -38,15 +34,17 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj); #define I915_GEM_STOLEN_BIAS SZ_128K -bool i915_gem_stolen_initialized(const struct drm_i915_private *i915); -u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915); -u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915); +bool i915_gem_stolen_initialized(struct drm_device *drm); +u64 i915_gem_stolen_area_address(struct drm_device *drm); +u64 i915_gem_stolen_area_size(struct drm_device *drm); + +u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node); -u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915, - const struct drm_mm_node *node); +bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node); +u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node); +u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node); -bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node); -u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node); -u64 i915_gem_stolen_node_size(const struct drm_mm_node *node); +struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm); +void i915_gem_stolen_node_free(const struct intel_stolen_node *node); #endif /* __I915_GEM_STOLEN_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 5a296ba3758a..567b97d28d30 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -145,8 +145,9 @@ i915_tiling_ok(struct drm_i915_gem_object *obj, return false; } - if (GRAPHICS_VER(i915) == 2 || - (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))) + if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)) + tile_width = 128; + else if (GRAPHICS_VER(i915) == 2) tile_width = 128; else tile_width = 512; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 1f4814968868..f65fe86c02b5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -5,11 +5,13 @@ #include <linux/shmem_fs.h> +#include <drm/drm_buddy.h> +#include <drm/drm_print.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_tt.h> -#include <drm/drm_buddy.h> #include "i915_drv.h" +#include "i915_jiffies.h" #include "i915_ttm_buddy_manager.h" #include "intel_memory_region.h" #include "intel_region_ttm.h" @@ -1029,7 +1031,7 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!obj->ttm.created); - ttm_bo_put(i915_gem_to_ttm(obj)); + ttm_bo_fini(i915_gem_to_ttm(obj)); } static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) @@ -1325,7 +1327,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, * If this function fails, it will call the destructor, but * our caller still owns the object. So no freeing in the * destructor until obj->ttm.created is true. - * Similarly, in delayed_destroy, we can't call ttm_bo_put() + * Similarly, in delayed_destroy, we can't call ttm_bo_fini() * until successful initialization. */ ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c index 61596cecce4d..4824f948daed 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -3,6 +3,7 @@ * Copyright © 2021 Intel Corporation */ +#include <drm/drm_print.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_tt.h> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 307a18eede72..77cc3af3d518 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -38,6 +38,8 @@ #include <linux/swap.h> #include <linux/sched/mm.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index 54829801d3f7..2893df65c359 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -138,6 +138,13 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence, local_bh_enable(); /* kick the tasklets if queues were reprioritised */ } +void i915_gem_fence_wait_priority_display(struct dma_fence *fence) +{ + struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY }; + + i915_gem_fence_wait_priority(fence, &attr); +} + int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c index 8f13ec4ff0d0..1f1290214031 100644 --- a/drivers/gpu/drm/i915/gem/i915_gemfs.c +++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c @@ -7,6 +7,8 @@ #include <linux/mount.h> #include <linux/fs_context.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_gemfs.h" #include "i915_utils.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 539c620364e3..3557e9e6f422 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include <drm/drm_print.h> + #include "i915_selftest.h" #include "display/intel_display_device.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index eb0158e43417..1330c0b431a7 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -962,13 +962,14 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, if (IS_ERR(rpcs)) return PTR_ERR(rpcs); + i915_gem_ww_ctx_init(&ww, false); + batch = i915_vma_instance(rpcs, ce->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); goto err_put; } - i915_gem_ww_ctx_init(&ww, false); retry: err = i915_gem_object_lock(obj, &ww); if (!err) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 78734c404a6d..0d250d57496a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -7,6 +7,8 @@ #include <linux/highmem.h> #include <linux/prime_numbers.h> +#include <drm/drm_print.h> + #include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c index 8116fd5987e2..8c01fb6d4e7b 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c @@ -292,15 +292,15 @@ int gen4_emit_bb_start(struct i915_request *rq, void gen2_irq_enable(struct intel_engine_cs *engine) { - engine->i915->irq_mask &= ~engine->irq_enable_mask; - intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); + engine->i915->gen2_imr_mask &= ~engine->irq_enable_mask; + intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->gen2_imr_mask); intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); } void gen2_irq_disable(struct intel_engine_cs *engine) { - engine->i915->irq_mask |= engine->irq_enable_mask; - intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); + engine->i915->gen2_imr_mask |= engine->irq_enable_mask; + intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->gen2_imr_mask); } void gen5_irq_enable(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index e9f65f27b53f..071c1cc45257 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -3,6 +3,8 @@ * Copyright © 2014 Intel Corporation */ +#include <drm/drm_print.h> + #include "gen8_engine_cs.h" #include "intel_engine_regs.h" #include "intel_gpu_commands.h" diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index cc866773ba6f..bf6117d5fc57 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -8,6 +8,8 @@ #include <trace/events/dma_fence.h> #include <uapi/linux/sched/types.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_trace.h" #include "intel_breadcrumbs.h" diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 8d4bb95f8424..b279878dca29 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -3,7 +3,10 @@ * Copyright © 2019 Intel Corporation */ +#include <drm/drm_print.h> + #include "i915_drv.h" +#include "i915_jiffies.h" #include "i915_request.h" #include "intel_context.h" diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c index 833987015b8b..be4bbff1a57c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_user.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -7,6 +7,8 @@ #include <linux/list_sort.h> #include <linux/llist.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "intel_engine.h" #include "intel_engine_user.h" diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 7f389cb0bde4..3df683b0402a 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -110,6 +110,8 @@ #include <linux/interrupt.h> #include <linux/string_helpers.h> +#include <drm/drm_print.h> + #include "gen8_engine_cs.h" #include "i915_drv.h" #include "i915_list_util.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 46a5aa4ab9c8..08c4e735481b 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -9,6 +9,7 @@ #include <linux/stop_machine.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <drm/intel/i915_drm.h> #include <drm/intel/intel-gtt.h> diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index 889e61843ff3..5eda98ebc1ae 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -5,6 +5,8 @@ #include <linux/highmem.h> +#include <drm/drm_print.h> + #include "display/intel_display.h" #include "i915_drv.h" #include "i915_reg.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c index c5f5f0bdfb2c..cc5d345c5e29 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c @@ -5,6 +5,7 @@ #include "intel_ggtt_gmch.h" +#include <drm/drm_print.h> #include <drm/intel/intel-gtt.h> #include <linux/agp_backend.h> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c index 88b147fa5cb1..c90b35881a26 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -205,7 +205,7 @@ static u64 div_u64_roundup(u64 nom, u32 den) u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count) { - return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency); + return mul_u64_u32_div(count, NSEC_PER_SEC, gt->clock_frequency); } u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count) @@ -215,7 +215,7 @@ u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count) u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns) { - return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC); + return mul_u64_u32_div(ns, gt->clock_frequency, NSEC_PER_SEC); } u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c index dcd40b30a96b..bd9abbd6d3d4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c @@ -5,6 +5,8 @@ #include <linux/debugfs.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "intel_gt.h" #include "intel_gt_debugfs.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 87ef85483bae..96411f357f5d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -7,6 +7,8 @@ #include <linux/seq_file.h> #include <linux/string_helpers.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_reg.h" #include "intel_gt.h" diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index c481b56fa67d..e8927ad49142 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3,6 +3,8 @@ * Copyright © 2014 Intel Corporation */ +#include <drm/drm_print.h> + #include "gem/i915_gem_lmem.h" #include "gen8_engine_cs.h" diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 5dd8121f4b15..e8d93a657ef6 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -3,6 +3,8 @@ * Copyright © 2015 Intel Corporation */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "intel_engine.h" diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index bf38cc5fe872..286d49ecc449 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -6,6 +6,9 @@ #include <linux/pm_runtime.h> #include <linux/string_helpers.h> +#include <drm/drm_print.h> + +#include "display/vlv_clock.h" #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_reg.h" @@ -802,7 +805,7 @@ u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, enum intel_rc6_res_type id) /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { mul = 1000000; - div = i915->czclk_freq; + div = vlv_clock_get_czclk(&i915->drm); overflow_hw = BIT_ULL(40); time_hw = vlv_residency_raw(uncore, reg); } else { diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index 51bb27e10a4f..a30060fd4429 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_pci.h" #include "i915_reg.h" @@ -18,16 +20,6 @@ #include "gt/intel_gt_regs.h" #ifdef CONFIG_64BIT -static void _release_bars(struct pci_dev *pdev) -{ - int resno; - - for (resno = PCI_STD_RESOURCES; resno < PCI_STD_RESOURCE_END; resno++) { - if (pci_resource_len(pdev, resno)) - pci_release_resource(pdev, resno); - } -} - static void _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size) { @@ -35,9 +27,7 @@ _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size) int bar_size = pci_rebar_bytes_to_size(size); int ret; - _release_bars(pdev); - - ret = pci_resize_resource(pdev, resno, bar_size); + ret = pci_resize_resource(pdev, resno, bar_size, 0); if (ret) { drm_info(&i915->drm, "Failed to resize BAR%d to %dM (%pe)\n", resno, 1 << bar_size, ERR_PTR(ret)); @@ -61,16 +51,12 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR)); if (i915->params.lmem_bar_size) { - u32 bar_sizes; - - rebar_size = i915->params.lmem_bar_size * - (resource_size_t)SZ_1M; - bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR); - + rebar_size = i915->params.lmem_bar_size * (resource_size_t)SZ_1M; if (rebar_size == current_size) return; - if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) || + if (!pci_rebar_size_supported(pdev, GEN12_LMEM_BAR, + pci_rebar_bytes_to_size(rebar_size)) || rebar_size >= roundup_pow_of_two(lmem_size)) { rebar_size = lmem_size; diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 4b56ec3743cf..d53766c288f7 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -3,6 +3,8 @@ * Copyright © 2014 Intel Corporation */ +#include <drm/drm_print.h> + #include "gem/i915_gem_internal.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 4da94098bd3e..b01c837ab646 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -7,8 +7,8 @@ #include <drm/intel/i915_drm.h> -#include "display/intel_display.h" #include "display/intel_display_rps.h" +#include "display/vlv_clock.h" #include "soc/intel_dram.h" #include "i915_drv.h" @@ -1690,10 +1690,7 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); - rps->gpll_ref_freq = - vlv_get_cck_clock(&i915->drm, "GPLL ref", - CCK_GPLL_CLOCK_CONTROL, - i915->czclk_freq); + rps->gpll_ref_freq = vlv_clock_get_gpll(&i915->drm); drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", rps->gpll_ref_freq); @@ -1703,13 +1700,13 @@ static void vlv_rps_init(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + vlv_init_gpll_ref_freq(rps); + vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT) | BIT(VLV_IOSF_SB_NC) | BIT(VLV_IOSF_SB_CCK)); - vlv_init_gpll_ref_freq(rps); - rps->max_freq = vlv_rps_max_freq(rps); rps->rp0_freq = rps->max_freq; drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", @@ -1737,13 +1734,13 @@ static void chv_rps_init(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + vlv_init_gpll_ref_freq(rps); + vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT) | BIT(VLV_IOSF_SB_NC) | BIT(VLV_IOSF_SB_CCK)); - vlv_init_gpll_ref_freq(rps); - rps->max_freq = chv_rps_max_freq(rps); rps->rp0_freq = rps->max_freq; drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", @@ -1780,6 +1777,7 @@ static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) { + struct drm_i915_private *i915 = rps_to_i915(rps); struct intel_uncore *uncore = rps_to_uncore(rps); const struct intel_rps_ei *prev = &rps->ei; struct intel_rps_ei now; @@ -1796,7 +1794,7 @@ static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) time = ktime_us_delta(now.ktime, prev->ktime); - time *= rps_to_i915(rps)->czclk_freq; + time *= vlv_clock_get_czclk(&i915->drm); /* Workload can be split between render + media, * e.g. SwapBuffers being blitted in X after being rendered in diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c index 2945526d52d1..fb260d1ec360 100644 --- a/drivers/gpu/drm/i915/gt/intel_sa_media.c +++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c @@ -4,6 +4,7 @@ */ #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "i915_drv.h" #include "gt/intel_gt.h" diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 9501d323d0d3..656a499b2706 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -5,6 +5,8 @@ #include <linux/string_helpers.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_perf_types.h" #include "intel_engine_regs.h" diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c index c2ee5e1826b5..1dc8205bc64d 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c @@ -7,6 +7,8 @@ #include <linux/bitmap.h> #include <linux/string_helpers.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "intel_gt_debugfs.h" #include "intel_gt_regs.h" diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index b9640212d659..843f72829a24 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -4,6 +4,7 @@ */ #include <drm/drm_cache.h> +#include <drm/drm_print.h> #include "gem/i915_gem_internal.h" diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.h b/drivers/gpu/drm/i915/gt/intel_tlb.h index 337327af92ac..ec7612216248 100644 --- a/drivers/gpu/drm/i915/gt/intel_tlb.h +++ b/drivers/gpu/drm/i915/gt/intel_tlb.h @@ -18,7 +18,7 @@ void intel_gt_fini_tlb(struct intel_gt *gt); static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt) { - return seqprop_sequence(>->tlb.seqno); + return raw_read_seqcount(>->tlb.seqno); } static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_wopcm.c b/drivers/gpu/drm/i915/gt/intel_wopcm.c index 7ebbcc191c2d..1b26ff6488b3 100644 --- a/drivers/gpu/drm/i915/gt/intel_wopcm.c +++ b/drivers/gpu/drm/i915/gt/intel_wopcm.c @@ -3,6 +3,8 @@ * Copyright © 2017-2019 Intel Corporation */ +#include <drm/drm_print.h> + #include "intel_wopcm.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 7d486dfa2fc1..ece88c612e27 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -5,6 +5,7 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "i915_mmio_range.h" #include "intel_context.h" #include "intel_engine_pm.h" #include "intel_engine_regs.h" @@ -2923,7 +2924,7 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine) wa_list_apply(&engine->wa_list); } -static const struct i915_range mcr_ranges_gen8[] = { +static const struct i915_mmio_range mcr_ranges_gen8[] = { { .start = 0x5500, .end = 0x55ff }, { .start = 0x7000, .end = 0x7fff }, { .start = 0x9400, .end = 0x97ff }, @@ -2932,7 +2933,7 @@ static const struct i915_range mcr_ranges_gen8[] = { {}, }; -static const struct i915_range mcr_ranges_gen12[] = { +static const struct i915_mmio_range mcr_ranges_gen12[] = { { .start = 0x8150, .end = 0x815f }, { .start = 0x9520, .end = 0x955f }, { .start = 0xb100, .end = 0xb3ff }, @@ -2941,7 +2942,7 @@ static const struct i915_range mcr_ranges_gen12[] = { {}, }; -static const struct i915_range mcr_ranges_xehp[] = { +static const struct i915_mmio_range mcr_ranges_xehp[] = { { .start = 0x4000, .end = 0x4aff }, { .start = 0x5200, .end = 0x52ff }, { .start = 0x5400, .end = 0x7fff }, @@ -2960,7 +2961,7 @@ static const struct i915_range mcr_ranges_xehp[] = { static bool mcr_range(struct drm_i915_private *i915, u32 offset) { - const struct i915_range *mcr_ranges; + const struct i915_mmio_range *mcr_ranges; int i; if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index 5eb46700dc4e..ab76703f6e8b 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include <drm/drm_print.h> + #include "i915_selftest.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index 0454eb1814bb..a06b397b6d42 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -5,12 +5,15 @@ #include <linux/prime_numbers.h> +#include <drm/drm_print.h> + #include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_reset.h" #include "gt/selftest_engine_heartbeat.h" +#include "i915_jiffies.h" #include "i915_selftest.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c index 54bc447efce0..fdf0e9858607 100644 --- a/drivers/gpu/drm/i915/gt/selftest_migrate.c +++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c @@ -710,7 +710,14 @@ static int threaded_migrate(struct intel_migrate *migrate, thread[i].tsk = tsk; } - msleep(10 * n_cpus); /* start all threads before we kthread_stop() */ + /* + * Start all threads before we kthread_stop(). + * In CHV / BXT+VTD environments, where VMA pinning is committed + * asynchronously, empirically determined 100ms delay is needed + * to avoid stopping threads that may still wait for completion of + * intel_ggtt_bind_vma and fail with -ERESTARTSYS when interrupted. + */ + msleep((intel_vm_no_concurrent_access_wa(migrate->context->vm->i915) ? 100 : 10) * n_cpus); for (i = 0; i < n_cpus; ++i) { struct task_struct *tsk = thread[i].tsk; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c index 9bd29be7656f..dabb870dcdb1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <drm/drm_print.h> + #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gpu_commands.h" diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 62d14f82256f..8cc6e712b0f7 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -34,6 +34,8 @@ * */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_reg.h" #include "gt/intel_ggtt_fencing.h" diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 9bafac1eaf48..295a7b5e1d7c 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -31,6 +31,8 @@ * */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "gvt.h" #include "intel_pci_config.h" diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d432fdd69833..df04e4ead8ea 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -36,6 +36,8 @@ #include <linux/slab.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_reg.h" #include "display/intel_display_regs.h" diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 74197e337585..06517d1f07a2 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -33,6 +33,7 @@ */ #include <drm/display/drm_dp.h> +#include <drm/drm_print.h> #include "i915_drv.h" #include "i915_reg.h" diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 4f599af766b0..92506c80322d 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -33,6 +33,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> +#include <drm/drm_print.h> #include "gem/i915_gem_dmabuf.h" diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 2031b97de2b7..30e414381af3 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -33,6 +33,7 @@ */ #include <drm/display/drm_dp.h> +#include <drm/drm_print.h> #include "display/intel_dp_aux_regs.h" #include "display/intel_gmbus.h" diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index ae9b0ded3651..076d9139edc6 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -33,6 +33,8 @@ * */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index f446f73f0fe2..36ea12ade849 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -37,6 +37,7 @@ */ #include <drm/display/drm_dp.h> +#include <drm/drm_print.h> #include "i915_drv.h" #include "i915_reg.h" diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index a956da68e6bd..3e66269bc4ee 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -31,6 +31,8 @@ #include <linux/eventfd.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_reg.h" #include "display/intel_display_regs.h" diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 96d23717684f..3abc9206f1a8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -48,6 +48,7 @@ #include <linux/nospec.h> #include <drm/drm_edid.h> +#include <drm/drm_print.h> #include "i915_drv.h" #include "intel_gvt.h" @@ -1240,19 +1241,15 @@ static int intel_vgpu_ioctl_get_region_info(struct vfio_device *vfio_dev, } if ((info->flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) { - switch (cap_type_id) { - case VFIO_REGION_INFO_CAP_SPARSE_MMAP: + ret = -EINVAL; + if (cap_type_id == VFIO_REGION_INFO_CAP_SPARSE_MMAP) { ret = vfio_info_add_capability( caps, &sparse->header, struct_size(sparse, areas, sparse->nr_areas)); - if (ret) { - kfree(sparse); - return ret; - } - break; - default: + } + if (ret) { kfree(sparse); - return -EINVAL; + return ret; } } @@ -1330,21 +1327,27 @@ static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd, if (copy_from_user(&hdr, (void __user *)arg, minsz)) return -EFAULT; + if (!is_power_of_2(hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) || + !is_power_of_2(hdr.flags & VFIO_IRQ_SET_ACTION_TYPE_MASK)) + return -EINVAL; + if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { int max = intel_vgpu_get_irq_count(vgpu, hdr.index); + if (!hdr.count) + return -EINVAL; + ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS, &data_size); if (ret) { - gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); - return -EINVAL; - } - if (data_size) { - data = memdup_user((void __user *)(arg + minsz), - data_size); - if (IS_ERR(data)) - return PTR_ERR(data); + gvt_vgpu_err("vfio_set_irqs_validate_and_prepare failed\n"); + return ret; } + + data = memdup_user((void __user *)(arg + minsz), + data_size); + if (IS_ERR(data)) + return PTR_ERR(data); } ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index, diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index da1135fa7cda..214eb7effa31 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -34,6 +34,9 @@ */ #include <linux/vmalloc.h> + +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_reg.h" #include "display/intel_display_regs.h" @@ -49,7 +52,7 @@ * @gpa: guest physical address * * Returns: - * Zero on success, negative error code if failed + * The MMIO offset of the given GPA */ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) { @@ -58,7 +61,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) } #define reg_is_mmio(gvt, reg) \ - (reg >= 0 && reg < gvt->device_info.mmio_size) + (reg < gvt->device_info.mmio_size) #define reg_is_gtt(gvt, reg) \ (reg >= gvt->device_info.gtt_start_offset \ diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 0b810baad20a..d4e9d485d382 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -33,6 +33,8 @@ * */ +#include <drm/drm_print.h> + #include "gt/intel_context.h" #include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 6e87c10bc454..63ad1fed525a 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -35,6 +35,8 @@ #include <linux/kthread.h> +#include <drm/drm_print.h> + #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" #include "gt/intel_execlists_submission.h" diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 11260392234a..c49e4bf95a30 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -31,6 +31,8 @@ * */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 2905df83e180..7654f1be8d3b 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -28,6 +28,7 @@ #include <linux/highmem.h> #include <drm/drm_cache.h> +#include <drm/drm_print.h> #include "gt/intel_engine.h" #include "gt/intel_engine_regs.h" diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c index 24e5bb8a670e..3cb615ffa96d 100644 --- a/drivers/gpu/drm/i915/i915_config.c +++ b/drivers/gpu/drm/i915/i915_config.c @@ -6,7 +6,7 @@ #include <linux/kernel.h> #include "i915_config.h" -#include "i915_utils.h" +#include "i915_jiffies.h" unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index c2e38d4bcd01..42f6b44f0027 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -32,6 +32,7 @@ #include <linux/string_helpers.h> #include <drm/drm_debugfs.h> +#include <drm/drm_print.h> #include "gem/i915_gem_context.h" #include "gt/intel_gt.h" diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index a28c3710c4d5..c97b76771917 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -46,6 +46,8 @@ #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> +#include <drm/intel/display_member.h> +#include <drm/intel/display_parent_interface.h> #include "display/i9xx_display_sr.h" #include "display/intel_bw.h" @@ -737,6 +739,18 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) "DRM_I915_DEBUG_RUNTIME_PM enabled\n"); } +static const struct intel_display_parent_interface parent = { + .rpm = &i915_display_rpm_interface, +}; + +const struct intel_display_parent_interface *i915_driver_parent_interface(void) +{ + return &parent; +} + +/* Ensure drm and display members are placed properly. */ +INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct drm_i915_private, drm, display); + static struct drm_i915_private * i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -758,7 +772,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) /* Set up device info and initial runtime info. */ intel_device_info_driver_create(i915, pdev->device, match_info); - display = intel_display_device_probe(pdev); + display = intel_display_device_probe(pdev, &parent); if (IS_ERR(display)) return ERR_CAST(display); @@ -978,7 +992,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_runtime_pm_disable(&i915->runtime_pm); intel_power_domains_disable(display); - drm_client_dev_suspend(&i915->drm, false); + drm_client_dev_suspend(&i915->drm); if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(&i915->drm); intel_display_driver_disable_user_access(display); @@ -1053,7 +1067,6 @@ static int i915_drm_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_display *display = dev_priv->display; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); pci_power_t opregion_target_state; disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); @@ -1061,14 +1074,12 @@ static int i915_drm_suspend(struct drm_device *dev) /* We do a lot of poking in a lot of registers, make sure they work * properly. */ intel_power_domains_disable(display); - drm_client_dev_suspend(dev, false); + drm_client_dev_suspend(dev); if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(dev); intel_display_driver_disable_user_access(display); } - pci_save_state(pdev); - intel_display_driver_suspend(display); intel_irq_suspend(dev_priv); @@ -1103,7 +1114,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_display *display = dev_priv->display; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; struct intel_gt *gt; int ret, i; @@ -1124,11 +1134,21 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) if (ret) { drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret); intel_display_power_resume_early(display); - - goto out; } - pci_disable_device(pdev); + enable_rpm_wakeref_asserts(rpm); + + if (!dev_priv->uncore.user_forcewake_count) + intel_runtime_pm_driver_release(rpm); + + return ret; +} + +static int i915_drm_suspend_noirq(struct drm_device *dev, bool hibernation) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + /* * During hibernation on some platforms the BIOS may try to access * the device even though it's already in D3 and hang the machine. So @@ -1140,21 +1160,20 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) * Lenovo Thinkpad X301, X61s, X60, T60, X41 * Fujitsu FSC S7110 * Acer Aspire 1830T + * + * pci_save_state() prevents drivers/pci from + * automagically putting the device into D3. */ - if (!(hibernation && GRAPHICS_VER(dev_priv) < 6)) - pci_set_power_state(pdev, PCI_D3hot); - -out: - enable_rpm_wakeref_asserts(rpm); - if (!dev_priv->uncore.user_forcewake_count) - intel_runtime_pm_driver_release(rpm); + if (hibernation && GRAPHICS_VER(dev_priv) < 6) + pci_save_state(pdev); - return ret; + return 0; } int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) { + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); int error; if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND && @@ -1168,7 +1187,14 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, if (error) return error; - return i915_drm_suspend_late(&i915->drm, false); + error = i915_drm_suspend_late(&i915->drm, false); + if (error) + return error; + + pci_save_state(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; } static int i915_drm_resume(struct drm_device *dev) @@ -1245,7 +1271,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_opregion_resume(display); - drm_client_dev_resume(dev, false); + drm_client_dev_resume(dev); intel_power_domains_enable(display); @@ -1260,7 +1286,6 @@ static int i915_drm_resume_early(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_display *display = dev_priv->display; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct intel_gt *gt; int ret, i; @@ -1274,41 +1299,6 @@ static int i915_drm_resume_early(struct drm_device *dev) * similar so that power domains can be employed. */ - /* - * Note that we need to set the power state explicitly, since we - * powered off the device during freeze and the PCI core won't power - * it back up for us during thaw. Powering off the device during - * freeze is not a hard requirement though, and during the - * suspend/resume phases the PCI core makes sure we get here with the - * device powered on. So in case we change our freeze logic and keep - * the device powered we can also remove the following set power state - * call. - */ - ret = pci_set_power_state(pdev, PCI_D0); - if (ret) { - drm_err(&dev_priv->drm, - "failed to set PCI D0 power state (%d)\n", ret); - return ret; - } - - /* - * Note that pci_enable_device() first enables any parent bridge - * device and only then sets the power state for this device. The - * bridge enabling is a nop though, since bridge devices are resumed - * first. The order of enabling power and enabling the device is - * imposed by the PCI core as described above, so here we preserve the - * same order for the freeze/thaw phases. - * - * TODO: eventually we should remove pci_disable_device() / - * pci_enable_enable_device() from suspend/resume. Due to how they - * depend on the device enable refcount we can't anyway depend on them - * disabling/enabling the device. - */ - if (pci_enable_device(pdev)) - return -EIO; - - pci_set_master(pdev); - disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); ret = vlv_resume_prepare(dev_priv, false); @@ -1328,11 +1318,18 @@ static int i915_drm_resume_early(struct drm_device *dev) int i915_driver_resume_switcheroo(struct drm_i915_private *i915) { + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); int ret; if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) return 0; + ret = pci_set_power_state(pdev, PCI_D0); + if (ret) + return ret; + + pci_restore_state(pdev); + ret = i915_drm_resume_early(&i915->drm); if (ret) return ret; @@ -1389,6 +1386,16 @@ static int i915_pm_suspend_late(struct device *kdev) return i915_drm_suspend_late(&i915->drm, false); } +static int i915_pm_suspend_noirq(struct device *kdev) +{ + struct drm_i915_private *i915 = kdev_to_i915(kdev); + + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + return i915_drm_suspend_noirq(&i915->drm, false); +} + static int i915_pm_poweroff_late(struct device *kdev) { struct drm_i915_private *i915 = kdev_to_i915(kdev); @@ -1399,6 +1406,16 @@ static int i915_pm_poweroff_late(struct device *kdev) return i915_drm_suspend_late(&i915->drm, true); } +static int i915_pm_poweroff_noirq(struct device *kdev) +{ + struct drm_i915_private *i915 = kdev_to_i915(kdev); + + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + return i915_drm_suspend_noirq(&i915->drm, true); +} + static int i915_pm_resume_early(struct device *kdev) { struct drm_i915_private *i915 = kdev_to_i915(kdev); @@ -1664,24 +1681,25 @@ const struct dev_pm_ops i915_pm_ops = { .prepare = i915_pm_prepare, .suspend = i915_pm_suspend, .suspend_late = i915_pm_suspend_late, + .suspend_noirq = i915_pm_suspend_noirq, .resume_early = i915_pm_resume_early, .resume = i915_pm_resume, .complete = i915_pm_complete, /* * S4 event handlers - * @freeze, @freeze_late : called (1) before creating the - * hibernation image [PMSG_FREEZE] and - * (2) after rebooting, before restoring - * the image [PMSG_QUIESCE] - * @thaw, @thaw_early : called (1) after creating the hibernation - * image, before writing it [PMSG_THAW] - * and (2) after failing to create or - * restore the image [PMSG_RECOVER] - * @poweroff, @poweroff_late: called after writing the hibernation - * image, before rebooting [PMSG_HIBERNATE] - * @restore, @restore_early : called after rebooting and restoring the - * hibernation image [PMSG_RESTORE] + * @freeze* : called (1) before creating the + * hibernation image [PMSG_FREEZE] and + * (2) after rebooting, before restoring + * the image [PMSG_QUIESCE] + * @thaw* : called (1) after creating the hibernation + * image, before writing it [PMSG_THAW] + * and (2) after failing to create or + * restore the image [PMSG_RECOVER] + * @poweroff* : called after writing the hibernation + * image, before rebooting [PMSG_HIBERNATE] + * @restore* : called after rebooting and restoring the + * hibernation image [PMSG_RESTORE] */ .freeze = i915_pm_freeze, .freeze_late = i915_pm_freeze_late, @@ -1689,6 +1707,7 @@ const struct dev_pm_ops i915_pm_ops = { .thaw = i915_pm_thaw, .poweroff = i915_pm_suspend, .poweroff_late = i915_pm_poweroff_late, + .poweroff_noirq = i915_pm_poweroff_noirq, .restore_early = i915_pm_restore_early, .restore = i915_pm_restore, diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h index 1e95ecb2a163..9551519ab429 100644 --- a/drivers/gpu/drm/i915/i915_driver.h +++ b/drivers/gpu/drm/i915/i915_driver.h @@ -12,6 +12,7 @@ struct pci_dev; struct pci_device_id; struct drm_i915_private; struct drm_printer; +struct intel_display_parent_interface; #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" @@ -24,6 +25,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915); int i915_driver_resume_switcheroo(struct drm_i915_private *i915); int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); +const struct intel_display_parent_interface *i915_driver_parent_interface(void); void i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6a768aad8edd..5381a934a671 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -174,6 +174,7 @@ struct i915_selftest_stash { struct drm_i915_private { struct drm_device drm; + /* display device data, must be placed after drm device member */ struct intel_display *display; /* FIXME: Device release actions should all be moved to drmm_ */ @@ -234,14 +235,11 @@ struct drm_i915_private { /* Sideband mailbox protection */ struct mutex sb_lock; - /** Cached value of IMR to avoid reads in updating the bitfield */ - u32 irq_mask; + /* Cached value of gen 2-4 IMR to avoid reads in updating the bitfield */ + u32 gen2_imr_mask; bool preserve_bios_swizzle; - unsigned int hpll_freq; - unsigned int czclk_freq; - /** * wq - Driver workqueue for GEM. * @@ -313,6 +311,8 @@ struct drm_i915_private { struct file *mmap_singleton; } gem; + spinlock_t frontbuffer_lock; /* protects obj->frontbuffer (write-side) */ + struct intel_pxp *pxp; struct i915_pmu pmu; @@ -490,16 +490,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P) #define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2) #define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE) -/* - * Display code shared by i915 and Xe relies on macros like IS_LUNARLAKE, - * so we need to define these even on platforms that the i915 base driver - * doesn't support. Ensure the parameter is used in the definition to - * avoid 'unused variable' warnings when compiling the shared display code - * for i915. - */ -#define IS_LUNARLAKE(i915) (0 && i915) -#define IS_BATTLEMAGE(i915) (0 && i915) -#define IS_PANTHERLAKE(i915) (0 && i915) #define IS_ARROWLAKE_H(i915) \ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H) @@ -604,8 +594,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ -#define HAS_128_BYTE_Y_TILING(i915) (GRAPHICS_VER(i915) != 2 && \ - !(IS_I915G(i915) || IS_I915GM(i915))) +#define HAS_128_BYTE_Y_TILING(i915) (!IS_I915G(i915) && !IS_I915GM(i915)) #define HAS_RC6(i915) (INTEL_INFO(i915)->has_rc6) #define HAS_RC6p(i915) (INTEL_INFO(i915)->has_rc6p) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e14a0c3db999..4c82c9544b93 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -37,6 +37,7 @@ #include <linux/mman.h> #include <drm/drm_cache.h> +#include <drm/drm_print.h> #include <drm/drm_vma_manager.h> #include "gem/i915_gem_clflush.h" @@ -1298,6 +1299,8 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv) { i915_gem_init__mm(dev_priv); i915_gem_init__contexts(dev_priv); + + spin_lock_init(&dev_priv->frontbuffer_lock); } void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 6fcda6d7b5b7..cf47c2491a0a 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -2,6 +2,8 @@ * SPDX-License-Identifier: MIT */ +#include <drm/drm_print.h> + #include "display/intel_overlay.h" #include "gem/i915_gem_mman.h" #include "gt/intel_engine_user.h" diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8d5da222a187..1898be4ddc8b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -32,13 +32,12 @@ #include <linux/sysrq.h> #include <drm/drm_drv.h> +#include <drm/drm_print.h> -#include "display/intel_display_core.h" #include "display/intel_display_irq.h" #include "display/intel_hotplug.h" #include "display/intel_hotplug_irq.h" #include "display/intel_lpe_audio.h" -#include "display/intel_psr_regs.h" #include "gt/intel_breadcrumbs.h" #include "gt/intel_gt.h" @@ -415,7 +414,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) struct drm_i915_private *i915 = arg; struct intel_display *display = i915->display; void __iomem * const regs = intel_uncore_regs(&i915->uncore); - u32 de_iir, gt_iir, de_ier, sde_ier = 0; + u32 gt_iir, de_ier = 0, sde_ier = 0; irqreturn_t ret = IRQ_NONE; if (unlikely(!intel_irqs_enabled(i915))) @@ -424,19 +423,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(&i915->runtime_pm); - /* disable master interrupt before clearing iir */ - de_ier = raw_reg_read(regs, DEIER); - raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); - - /* Disable south interrupts. We'll only write to SDEIIR once, so further - * interrupts will will be stored on its back queue, and then we'll be - * able to process them after we restore SDEIER (as soon as we restore - * it, we'll get an interrupt if SDEIIR still has something to process - * due to its back queue). */ - if (!HAS_PCH_NOP(display)) { - sde_ier = raw_reg_read(regs, SDEIER); - raw_reg_write(regs, SDEIER, 0); - } + /* Disable master and south interrupts */ + ilk_display_irq_master_disable(display, &de_ier, &sde_ier); /* Find, clear, then process each source of interrupt */ @@ -450,15 +438,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) ret = IRQ_HANDLED; } - de_iir = raw_reg_read(regs, DEIIR); - if (de_iir) { - raw_reg_write(regs, DEIIR, de_iir); - if (DISPLAY_VER(display) >= 7) - ivb_display_irq_handler(display, de_iir); - else - ilk_display_irq_handler(display, de_iir); + if (ilk_display_irq_handler(display)) ret = IRQ_HANDLED; - } if (GRAPHICS_VER(i915) >= 6) { u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); @@ -469,9 +450,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) } } - raw_reg_write(regs, DEIER, de_ier); - if (sde_ier) - raw_reg_write(regs, SDEIER, sde_ier); + /* Re-enable master and south interrupts */ + ilk_display_irq_master_enable(display, de_ier, sde_ier); pmu_irq_stats(i915, ret); @@ -656,22 +636,10 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) static void ilk_irq_reset(struct drm_i915_private *dev_priv) { struct intel_display *display = dev_priv->display; - struct intel_uncore *uncore = &dev_priv->uncore; - - gen2_irq_reset(uncore, DE_IRQ_REGS); - dev_priv->irq_mask = ~0u; - - if (GRAPHICS_VER(dev_priv) == 7) - intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); - - if (IS_HASWELL(dev_priv)) { - intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); - intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); - } + /* The master interrupt enable is in DEIER, reset display irq first */ + ilk_display_irq_reset(display); gen5_gt_irq_reset(to_gt(dev_priv)); - - ibx_display_irq_reset(display); } static void valleyview_irq_reset(struct drm_i915_private *dev_priv) @@ -826,9 +794,10 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); } +#define I9XX_HAS_FBC(i915) (IS_I85X(i915) || IS_I865G(i915) || IS_I915GM(i915) || IS_I945GM(i915)) + static u32 i9xx_error_mask(struct drm_i915_private *i915) { - struct intel_display *display = i915->display; /* * On gen2/3 FBC generates (seemingly spurious) * display INVALID_GTT/INVALID_GTT_PTE table errors. @@ -841,7 +810,7 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915) * Unfortunately we can't mask off individual PGTBL_ER bits, * so we just have to mask off all page table errors via EMR. */ - if (HAS_FBC(display)) + if (I9XX_HAS_FBC(i915)) return I915_ERROR_MEMORY_REFRESH; else return I915_ERROR_PAGE_TABLE | @@ -897,7 +866,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv) gen2_error_reset(uncore, GEN2_ERROR_REGS); gen2_irq_reset(uncore, GEN2_IRQ_REGS); - dev_priv->irq_mask = ~0u; + dev_priv->gen2_imr_mask = ~0u; } static void i915_irq_postinstall(struct drm_i915_private *dev_priv) @@ -908,28 +877,14 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv)); - dev_priv->irq_mask = - ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_MASTER_ERROR_INTERRUPT); + enable_mask = i9xx_display_irq_enable_mask(display) | + I915_MASTER_ERROR_INTERRUPT; - enable_mask = - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_MASTER_ERROR_INTERRUPT | - I915_USER_INTERRUPT; - - if (DISPLAY_VER(display) >= 3) { - dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT; - enable_mask |= I915_ASLE_INTERRUPT; - } + dev_priv->gen2_imr_mask = ~enable_mask; - if (HAS_HOTPLUG(display)) { - dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; - enable_mask |= I915_DISPLAY_PORT_INTERRUPT; - } + enable_mask |= I915_USER_INTERRUPT; - gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); + gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask); i915_display_irq_postinstall(display); } @@ -958,8 +913,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) ret = IRQ_HANDLED; - if (HAS_HOTPLUG(display) && - iir & I915_DISPLAY_PORT_INTERRUPT) + if (iir & I915_DISPLAY_PORT_INTERRUPT) hotplug_status = i9xx_hpd_irq_ack(display); /* Call regardless, as some status bits might not be @@ -999,7 +953,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv) gen2_error_reset(uncore, GEN2_ERROR_REGS); gen2_irq_reset(uncore, GEN2_IRQ_REGS); - dev_priv->irq_mask = ~0u; + dev_priv->gen2_imr_mask = ~0u; } static u32 i965_error_mask(struct drm_i915_private *i915) @@ -1029,25 +983,17 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv)); - dev_priv->irq_mask = - ~(I915_ASLE_INTERRUPT | - I915_DISPLAY_PORT_INTERRUPT | - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_MASTER_ERROR_INTERRUPT); - - enable_mask = - I915_ASLE_INTERRUPT | - I915_DISPLAY_PORT_INTERRUPT | - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_MASTER_ERROR_INTERRUPT | - I915_USER_INTERRUPT; + enable_mask = i9xx_display_irq_enable_mask(display) | + I915_MASTER_ERROR_INTERRUPT; + + dev_priv->gen2_imr_mask = ~enable_mask; + + enable_mask |= I915_USER_INTERRUPT; if (IS_G4X(dev_priv)) enable_mask |= I915_BSD_USER_INTERRUPT; - gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); + gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask); i965_display_irq_postinstall(display); } diff --git a/drivers/gpu/drm/i915/i915_jiffies.h b/drivers/gpu/drm/i915/i915_jiffies.h new file mode 100644 index 000000000000..18a4eaea897a --- /dev/null +++ b/drivers/gpu/drm/i915/i915_jiffies.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __I915_JIFFIES_H__ +#define __I915_JIFFIES_H__ + +#include <linux/jiffies.h> + +static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) +{ + unsigned long j = msecs_to_jiffies(m); + + return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); +} + +#endif /* __I915_JIFFIES_H__ */ diff --git a/drivers/gpu/drm/i915/i915_mmio_range.c b/drivers/gpu/drm/i915/i915_mmio_range.c new file mode 100644 index 000000000000..724041e81aa7 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_mmio_range.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include "i915_mmio_range.h" + +bool i915_mmio_range_table_contains(u32 addr, const struct i915_mmio_range *table) +{ + while (table->start || table->end) { + if (addr >= table->start && addr <= table->end) + return true; + + table++; + } + + return false; +} diff --git a/drivers/gpu/drm/i915/i915_mmio_range.h b/drivers/gpu/drm/i915/i915_mmio_range.h new file mode 100644 index 000000000000..f1c7086d3e3c --- /dev/null +++ b/drivers/gpu/drm/i915/i915_mmio_range.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef __I915_MMIO_RANGE_H__ +#define __I915_MMIO_RANGE_H__ + +#include <linux/types.h> + +/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */ +struct i915_mmio_range { + u32 start; + u32 end; +}; + +bool i915_mmio_range_table_contains(u32 addr, const struct i915_mmio_range *table); + +#endif /* __I915_MMIO_RANGE_H__ */ diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c index 5862754c662c..5d9c35b5a182 100644 --- a/drivers/gpu/drm/i915/i915_module.c +++ b/drivers/gpu/drm/i915/i915_module.c @@ -5,6 +5,7 @@ */ #include <drm/drm_drv.h> +#include <drm/drm_print.h> #include "gem/i915_gem_context.h" #include "gem/i915_gem_object.h" diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 1658f1246c6f..0b9d9f3f7813 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -219,6 +219,7 @@ #include "i915_perf.h" #include "i915_perf_oa_regs.h" #include "i915_reg.h" +#include "i915_mmio_range.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such @@ -4320,29 +4321,17 @@ static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) return false; } -static bool reg_in_range_table(u32 addr, const struct i915_range *table) -{ - while (table->start || table->end) { - if (addr >= table->start && addr <= table->end) - return true; - - table++; - } - - return false; -} - #define REG_EQUAL(addr, mmio) \ ((addr) == i915_mmio_reg_offset(mmio)) -static const struct i915_range gen7_oa_b_counters[] = { +static const struct i915_mmio_range gen7_oa_b_counters[] = { { .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */ { .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */ { .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */ {} }; -static const struct i915_range gen12_oa_b_counters[] = { +static const struct i915_mmio_range gen12_oa_b_counters[] = { { .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */ { .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */ { .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */ @@ -4353,7 +4342,7 @@ static const struct i915_range gen12_oa_b_counters[] = { {} }; -static const struct i915_range mtl_oam_b_counters[] = { +static const struct i915_mmio_range mtl_oam_b_counters[] = { { .start = 0x393000, .end = 0x39301c }, /* GEN12_OAM_STARTTRIG1[1-8] */ { .start = 0x393020, .end = 0x39303c }, /* GEN12_OAM_REPORTTRIG1[1-8] */ { .start = 0x393040, .end = 0x39307c }, /* GEN12_OAM_CEC[0-7][0-1] */ @@ -4361,43 +4350,43 @@ static const struct i915_range mtl_oam_b_counters[] = { {} }; -static const struct i915_range xehp_oa_b_counters[] = { +static const struct i915_mmio_range xehp_oa_b_counters[] = { { .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */ { .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */ {} }; -static const struct i915_range gen7_oa_mux_regs[] = { +static const struct i915_mmio_range gen7_oa_mux_regs[] = { { .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */ { .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */ { .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */ {} }; -static const struct i915_range hsw_oa_mux_regs[] = { +static const struct i915_mmio_range hsw_oa_mux_regs[] = { { .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */ { .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */ { .start = 0x25100, .end = 0x2ff90 }, {} }; -static const struct i915_range chv_oa_mux_regs[] = { +static const struct i915_mmio_range chv_oa_mux_regs[] = { { .start = 0x182300, .end = 0x1823a4 }, {} }; -static const struct i915_range gen8_oa_mux_regs[] = { +static const struct i915_mmio_range gen8_oa_mux_regs[] = { { .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */ { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ {} }; -static const struct i915_range gen11_oa_mux_regs[] = { +static const struct i915_mmio_range gen11_oa_mux_regs[] = { { .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */ {} }; -static const struct i915_range gen12_oa_mux_regs[] = { +static const struct i915_mmio_range gen12_oa_mux_regs[] = { { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */ { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */ { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */ @@ -4410,7 +4399,7 @@ static const struct i915_range gen12_oa_mux_regs[] = { * Ref: 14010536224: * 0x20cc is repurposed on MTL, so use a separate array for MTL. */ -static const struct i915_range mtl_oa_mux_regs[] = { +static const struct i915_mmio_range mtl_oa_mux_regs[] = { { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */ { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */ { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */ @@ -4421,61 +4410,61 @@ static const struct i915_range mtl_oa_mux_regs[] = { static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) { - return reg_in_range_table(addr, gen7_oa_b_counters); + return i915_mmio_range_table_contains(addr, gen7_oa_b_counters); } static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return reg_in_range_table(addr, gen7_oa_mux_regs) || - reg_in_range_table(addr, gen8_oa_mux_regs); + return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) || + i915_mmio_range_table_contains(addr, gen8_oa_mux_regs); } static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return reg_in_range_table(addr, gen7_oa_mux_regs) || - reg_in_range_table(addr, gen8_oa_mux_regs) || - reg_in_range_table(addr, gen11_oa_mux_regs); + return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) || + i915_mmio_range_table_contains(addr, gen8_oa_mux_regs) || + i915_mmio_range_table_contains(addr, gen11_oa_mux_regs); } static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return reg_in_range_table(addr, gen7_oa_mux_regs) || - reg_in_range_table(addr, hsw_oa_mux_regs); + return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) || + i915_mmio_range_table_contains(addr, hsw_oa_mux_regs); } static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return reg_in_range_table(addr, gen7_oa_mux_regs) || - reg_in_range_table(addr, chv_oa_mux_regs); + return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) || + i915_mmio_range_table_contains(addr, chv_oa_mux_regs); } static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) { - return reg_in_range_table(addr, gen12_oa_b_counters); + return i915_mmio_range_table_contains(addr, gen12_oa_b_counters); } static bool mtl_is_valid_oam_b_counter_addr(struct i915_perf *perf, u32 addr) { if (HAS_OAM(perf->i915) && GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70)) - return reg_in_range_table(addr, mtl_oam_b_counters); + return i915_mmio_range_table_contains(addr, mtl_oam_b_counters); return false; } static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) { - return reg_in_range_table(addr, xehp_oa_b_counters) || - reg_in_range_table(addr, gen12_oa_b_counters) || + return i915_mmio_range_table_contains(addr, xehp_oa_b_counters) || + i915_mmio_range_table_contains(addr, gen12_oa_b_counters) || mtl_is_valid_oam_b_counter_addr(perf, addr); } static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70)) - return reg_in_range_table(addr, mtl_oa_mux_regs); + return i915_mmio_range_table_contains(addr, mtl_oa_mux_regs); else - return reg_in_range_table(addr, gen12_oa_mux_regs); + return i915_mmio_range_table_contains(addr, gen12_oa_mux_regs); } static u32 mask_reg_value(u32 reg, u32 val) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 5bc696bfbb0f..a6697db21c72 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -6,6 +6,8 @@ #include <linux/pm_runtime.h> +#include <drm/drm_print.h> + #include "gt/intel_engine.h" #include "gt/intel_engine_pm.h" #include "gt/intel_engine_regs.h" @@ -895,7 +897,7 @@ static ssize_t i915_pmu_format_show(struct device *dev, struct i915_str_attribute *eattr; eattr = container_of(attr, struct i915_str_attribute, attr); - return sprintf(buf, "%s\n", eattr->str); + return sysfs_emit(buf, "%s\n", eattr->str); } #define I915_PMU_FORMAT_ATTR(_name, _config) \ @@ -925,7 +927,7 @@ static ssize_t i915_pmu_event_show(struct device *dev, struct i915_ext_attribute *eattr; eattr = container_of(attr, struct i915_ext_attribute, attr); - return sprintf(buf, "config=0x%lx\n", eattr->val); + return sysfs_emit(buf, "config=0x%lx\n", eattr->val); } #define __event(__counter, __name, __unit) \ diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 14d9ec0ed777..0c55fb6e9727 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -6,6 +6,8 @@ #include <linux/nospec.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_perf.h" #include "i915_query.h" diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 354ef75ef6a5..5bf3b4ab2baa 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1233,6 +1233,7 @@ #define OROM_OFFSET_MASK REG_GENMASK(20, 16) #define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700) +#define XE3P_ECC_IMPACTING_DE REG_BIT(12) #define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8) #define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4) #define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0) diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h index bfe98cb9a038..e81fac8ab51b 100644 --- a/drivers/gpu/drm/i915/i915_reg_defs.h +++ b/drivers/gpu/drm/i915/i915_reg_defs.h @@ -174,6 +174,16 @@ */ #define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val)) +/** + * REG_FIELD_MAX() - produce the maximum value representable by a field + * @__mask: shifted mask defining the field's length and position + * + * Local wrapper for FIELD_MAX() to return the maximum bit value that can + * be held in the field specified by @_mask, cast to u32 for consistency + * with other macros. + */ +#define REG_FIELD_MAX(__mask) ((u32)FIELD_MAX(__mask)) + typedef struct { u32 reg; } i915_reg_t; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index b9a2b2194c8f..4399941236cb 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -31,6 +31,8 @@ #include <linux/sched/signal.h> #include <linux/sched/mm.h> +#include <drm/drm_print.h> + #include "gem/i915_gem_context.h" #include "gt/intel_breadcrumbs.h" #include "gt/intel_context.h" diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c index d5b6d8ab31a2..7e0791024282 100644 --- a/drivers/gpu/drm/i915/i915_switcheroo.c +++ b/drivers/gpu/drm/i915/i915_switcheroo.c @@ -5,6 +5,8 @@ #include <linux/vga_switcheroo.h> +#include <drm/drm_print.h> + #include "display/intel_display_device.h" #include "i915_driver.h" diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 622c66666935..70e0d8615160 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -30,6 +30,8 @@ #include <linux/stat.h> #include <linux/sysfs.h> +#include <drm/drm_print.h> + #include "gt/intel_gt_regs.h" #include "gt/intel_rc6.h" #include "gt/intel_rps.h" diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c index 942345548bc3..d5c6e6605086 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c @@ -5,11 +5,11 @@ #include <linux/slab.h> +#include <drm/drm_buddy.h> +#include <drm/drm_print.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_bo.h> -#include <drm/drm_buddy.h> - #include "i915_ttm_buddy_manager.h" #include "i915_gem.h" diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index 49f7ed413132..89b920ccbccb 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -6,6 +6,7 @@ #include <linux/device.h> #include <drm/drm_drv.h> +#include <drm/drm_print.h> #include "i915_drv.h" #include "i915_reg.h" diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index a0c892e4c40d..4f75115b87d6 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -38,8 +38,10 @@ struct drm_i915_private; +#ifndef MISSING_CASE #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ __stringify(x), (long)(x)) +#endif #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) @@ -65,11 +67,13 @@ bool i915_error_injected(void); drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \ }) +#ifndef fetch_and_zero #define fetch_and_zero(ptr) ({ \ typeof(*ptr) __T = *(ptr); \ *(ptr) = (typeof(*ptr))0; \ __T; \ }) +#endif /* * check_user_mbz: Check that a user value exists and is zero @@ -100,43 +104,6 @@ static inline bool is_power_of_2_u64(u64 n) return (n != 0 && ((n & (n - 1)) == 0)); } -static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) -{ - unsigned long j = msecs_to_jiffies(m); - - return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); -} - -/* - * If you need to wait X milliseconds between events A and B, but event B - * doesn't happen exactly after event A, you record the timestamp (jiffies) of - * when event A happened, then just before event B you call this function and - * pass the timestamp as the first argument, and X as the second argument. - */ -static inline void -wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) -{ - unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; - - /* - * Don't re-read the value of "jiffies" every time since it may change - * behind our back and break the math. - */ - tmp_jiffies = jiffies; - target_jiffies = timestamp_jiffies + - msecs_to_jiffies_timeout(to_wait_ms); - - if (time_after(target_jiffies, tmp_jiffies)) { - remaining_jiffies = target_jiffies - tmp_jiffies; - while (remaining_jiffies) - remaining_jiffies = - schedule_timeout_uninterruptible(remaining_jiffies); - } -} - -#define KHz(x) (1000 * (x)) -#define MHz(x) KHz(1000 * (x)) - void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint); static inline void __add_taint_for_CI(unsigned int taint) { diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index c97323973f9b..d29a06ea51a5 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -21,6 +21,8 @@ * SOFTWARE. */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_pvinfo.h" #include "i915_vgpu.h" diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 25e97031d76e..2c0a63664e13 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -24,7 +24,9 @@ #include <linux/sched/mm.h> #include <linux/dma-fence-array.h> + #include <drm/drm_gem.h> +#include <drm/drm_print.h> #include "display/intel_fb.h" #include "display/intel_frontbuffer.h" @@ -1595,8 +1597,20 @@ err_unlock: err_vma_res: i915_vma_resource_free(vma_res); err_fence: - if (work) - dma_fence_work_commit_imm(&work->base); + if (work) { + /* + * When pinning VMA to GGTT on CHV or BXT with VTD enabled, + * commit VMA binding asynchronously to avoid risk of lock + * inversion among reservation_ww locks held here and + * cpu_hotplug_lock acquired from stop_machine(), which we + * wrap around GGTT updates when running in those environments. + */ + if (i915_vma_is_ggtt(vma) && + intel_vm_no_concurrent_access_wa(vma->vm->i915)) + dma_fence_work_commit(&work->base); + else + dma_fence_work_commit_imm(&work->base); + } err_rpm: intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); @@ -1990,13 +2004,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma, } if (flags & EXEC_OBJECT_WRITE) { - struct intel_frontbuffer *front; + struct i915_frontbuffer *front; - front = i915_gem_object_get_frontbuffer(obj); + front = i915_gem_object_frontbuffer_lookup(obj); if (unlikely(front)) { - if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) + if (intel_frontbuffer_invalidate(&front->base, ORIGIN_CS)) i915_active_add_request(&front->write, rq); - intel_frontbuffer_put(front); + i915_gem_object_frontbuffer_put(front); } } diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c index 467740969431..175a240ac848 100644 --- a/drivers/gpu/drm/i915/intel_clock_gating.c +++ b/drivers/gpu/drm/i915/intel_clock_gating.c @@ -25,6 +25,8 @@ * */ +#include <drm/drm_print.h> + #include "display/i9xx_plane_regs.h" #include "display/intel_display.h" #include "display/intel_display_core.h" diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index dae9dce7d1b3..c3efc3454ec2 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -21,6 +21,8 @@ * SOFTWARE. */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_vgpu.h" #include "intel_gvt.h" diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index 59bd603e6deb..ce722f20cab1 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -5,6 +5,7 @@ #include <linux/prandom.h> +#include <drm/drm_print.h> #include <uapi/drm/i915_drm.h> #include "intel_memory_region.h" diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c index 55ffedad2490..756652b8ec97 100644 --- a/drivers/gpu/drm/i915/intel_pcode.c +++ b/drivers/gpu/drm/i915/intel_pcode.c @@ -3,6 +3,8 @@ * Copyright © 2013-2021 Intel Corporation */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_reg.h" #include "i915_wait_util.h" diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index 04525d92bec5..47a69aad5c3f 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -34,7 +34,7 @@ int intel_region_ttm_device_init(struct drm_i915_private *dev_priv) return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(), drm->dev, drm->anon_inode->i_mapping, - drm->vma_offset_manager, false, false); + drm->vma_offset_manager, 0); } /** diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 7ce3e6de0c19..d11c2814b787 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -29,6 +29,7 @@ #include <linux/pm_runtime.h> #include <drm/drm_print.h> +#include <drm/intel/display_parent_interface.h> #include "i915_drv.h" #include "i915_trace.h" @@ -177,6 +178,82 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, return track_intel_runtime_pm_wakeref(rpm); } +static struct intel_runtime_pm *drm_to_rpm(const struct drm_device *drm) +{ + struct drm_i915_private *i915 = to_i915(drm); + + return &i915->runtime_pm; +} + +static struct ref_tracker *i915_display_rpm_get(const struct drm_device *drm) +{ + return intel_runtime_pm_get(drm_to_rpm(drm)); +} + +static struct ref_tracker *i915_display_rpm_get_raw(const struct drm_device *drm) +{ + return intel_runtime_pm_get_raw(drm_to_rpm(drm)); +} + +static struct ref_tracker *i915_display_rpm_get_if_in_use(const struct drm_device *drm) +{ + return intel_runtime_pm_get_if_in_use(drm_to_rpm(drm)); +} + +static struct ref_tracker *i915_display_rpm_get_noresume(const struct drm_device *drm) +{ + return intel_runtime_pm_get_noresume(drm_to_rpm(drm)); +} + +static void i915_display_rpm_put(const struct drm_device *drm, struct ref_tracker *wakeref) +{ + intel_runtime_pm_put(drm_to_rpm(drm), wakeref); +} + +static void i915_display_rpm_put_raw(const struct drm_device *drm, struct ref_tracker *wakeref) +{ + intel_runtime_pm_put_raw(drm_to_rpm(drm), wakeref); +} + +static void i915_display_rpm_put_unchecked(const struct drm_device *drm) +{ + intel_runtime_pm_put_unchecked(drm_to_rpm(drm)); +} + +static bool i915_display_rpm_suspended(const struct drm_device *drm) +{ + return intel_runtime_pm_suspended(drm_to_rpm(drm)); +} + +static void i915_display_rpm_assert_held(const struct drm_device *drm) +{ + assert_rpm_wakelock_held(drm_to_rpm(drm)); +} + +static void i915_display_rpm_assert_block(const struct drm_device *drm) +{ + disable_rpm_wakeref_asserts(drm_to_rpm(drm)); +} + +static void i915_display_rpm_assert_unblock(const struct drm_device *drm) +{ + enable_rpm_wakeref_asserts(drm_to_rpm(drm)); +} + +const struct intel_display_rpm_interface i915_display_rpm_interface = { + .get = i915_display_rpm_get, + .get_raw = i915_display_rpm_get_raw, + .get_if_in_use = i915_display_rpm_get_if_in_use, + .get_noresume = i915_display_rpm_get_noresume, + .put = i915_display_rpm_put, + .put_raw = i915_display_rpm_put_raw, + .put_unchecked = i915_display_rpm_put_unchecked, + .suspended = i915_display_rpm_suspended, + .assert_held = i915_display_rpm_assert_held, + .assert_block = i915_display_rpm_assert_block, + .assert_unblock = i915_display_rpm_assert_unblock +}; + /** * intel_runtime_pm_get_raw - grab a raw runtime pm reference * @rpm: the intel_runtime_pm structure diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 7428bd8fa67f..ed6c43b17f9a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -14,6 +14,7 @@ struct device; struct drm_i915_private; struct drm_printer; +struct intel_display_rpm_interface; /* * This struct helps tracking the state needed for runtime PM, which puts the @@ -226,4 +227,6 @@ static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, } #endif +extern const struct intel_display_rpm_interface i915_display_rpm_interface; + #endif /* __INTEL_RUNTIME_PM_H__ */ diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c index 285b96fadfd5..60a2af5307fc 100644 --- a/drivers/gpu/drm/i915/intel_step.c +++ b/drivers/gpu/drm/i915/intel_step.c @@ -3,6 +3,8 @@ * Copyright © 2020,2021 Intel Corporation */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "intel_step.h" diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8cb59f8d1f4c..4adeb271fcbf 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -24,6 +24,7 @@ #include <linux/pm_runtime.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "display/intel_display_core.h" #include "gt/intel_engine_regs.h" @@ -35,6 +36,7 @@ #include "i915_reg.h" #include "i915_vgpu.h" #include "i915_wait_util.h" +#include "i915_mmio_range.h" #include "intel_uncore_trace.h" #define FORCEWAKE_ACK_TIMEOUT_MS 50 @@ -999,7 +1001,7 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset) * scanned for obvious mistakes or typos by the selftests. */ -static const struct i915_range gen8_shadowed_regs[] = { +static const struct i915_mmio_range gen8_shadowed_regs[] = { { .start = 0x2030, .end = 0x2030 }, { .start = 0xA008, .end = 0xA00C }, { .start = 0x12030, .end = 0x12030 }, @@ -1007,7 +1009,7 @@ static const struct i915_range gen8_shadowed_regs[] = { { .start = 0x22030, .end = 0x22030 }, }; -static const struct i915_range gen11_shadowed_regs[] = { +static const struct i915_mmio_range gen11_shadowed_regs[] = { { .start = 0x2030, .end = 0x2030 }, { .start = 0x2550, .end = 0x2550 }, { .start = 0xA008, .end = 0xA00C }, @@ -1034,7 +1036,7 @@ static const struct i915_range gen11_shadowed_regs[] = { { .start = 0x1D8510, .end = 0x1D8550 }, }; -static const struct i915_range gen12_shadowed_regs[] = { +static const struct i915_mmio_range gen12_shadowed_regs[] = { { .start = 0x2030, .end = 0x2030 }, { .start = 0x2510, .end = 0x2550 }, { .start = 0xA008, .end = 0xA00C }, @@ -1078,7 +1080,7 @@ static const struct i915_range gen12_shadowed_regs[] = { { .start = 0x1F8510, .end = 0x1F8550 }, }; -static const struct i915_range dg2_shadowed_regs[] = { +static const struct i915_mmio_range dg2_shadowed_regs[] = { { .start = 0x2030, .end = 0x2030 }, { .start = 0x2510, .end = 0x2550 }, { .start = 0xA008, .end = 0xA00C }, @@ -1117,7 +1119,7 @@ static const struct i915_range dg2_shadowed_regs[] = { { .start = 0x1F8510, .end = 0x1F8550 }, }; -static const struct i915_range mtl_shadowed_regs[] = { +static const struct i915_mmio_range mtl_shadowed_regs[] = { { .start = 0x2030, .end = 0x2030 }, { .start = 0x2510, .end = 0x2550 }, { .start = 0xA008, .end = 0xA00C }, @@ -1135,7 +1137,7 @@ static const struct i915_range mtl_shadowed_regs[] = { { .start = 0x22510, .end = 0x22550 }, }; -static const struct i915_range xelpmp_shadowed_regs[] = { +static const struct i915_mmio_range xelpmp_shadowed_regs[] = { { .start = 0x1C0030, .end = 0x1C0030 }, { .start = 0x1C0510, .end = 0x1C0550 }, { .start = 0x1C8030, .end = 0x1C8030 }, @@ -1156,7 +1158,7 @@ static const struct i915_range xelpmp_shadowed_regs[] = { { .start = 0x38CFD4, .end = 0x38CFDC }, }; -static int mmio_range_cmp(u32 key, const struct i915_range *range) +static int mmio_range_cmp(u32 key, const struct i915_mmio_range *range) { if (key < range->start) return -1; diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 6048b99b96cb..fafc2ca9a237 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -123,12 +123,6 @@ struct intel_forcewake_range { enum forcewake_domains domains; }; -/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */ -struct i915_range { - u32 start; - u32 end; -}; - struct intel_uncore { void __iomem *regs; @@ -162,7 +156,7 @@ struct intel_uncore { * Shadowed registers are special cases where we can safely write * to the register *without* grabbing forcewake. */ - const struct i915_range *shadowed_reg_table; + const struct i915_mmio_range *shadowed_reg_table; unsigned int shadowed_reg_table_entries; struct notifier_block pmic_bus_access_nb; diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 7fa194de5d35..b1883dccc22a 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -6,6 +6,8 @@ #include <linux/wait_bit.h> +#include <drm/drm_print.h> + #include "intel_runtime_pm.h" #include "intel_wakeref.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index 27d545c4e6a5..d4b0c76f335b 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -5,6 +5,8 @@ #include <linux/workqueue.h> +#include <drm/drm_print.h> + #include "gem/i915_gem_context.h" #include "gt/intel_context.h" #include "gt/intel_gt.h" diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c index 75df959b0aa0..2763773e627d 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c @@ -3,6 +3,8 @@ * Copyright(c) 2023 Intel Corporation. */ +#include <drm/drm_print.h> + #include "gem/i915_gem_internal.h" #include "gt/intel_context.h" diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c index 0e609547bef8..9fc575a3d0d5 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c @@ -3,6 +3,8 @@ * Copyright(c) 2021-2022, Intel Corporation. All rights reserved. */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "gem/i915_gem_region.h" diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c index 091c86e03d1a..1e63261b620f 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -3,6 +3,8 @@ * Copyright(c) 2020, Intel Corporation. All rights reserved. */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "intel_pxp.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 0d89d70b9c36..36c3a5460221 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -7,6 +7,8 @@ #include <linux/kref.h> #include <linux/string_helpers.h> +#include <drm/drm_print.h> + #include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 7ab4c4e60264..0a86e4857539 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1118,6 +1118,10 @@ static int misaligned_case(struct i915_address_space *vm, struct intel_memory_re goto err_put; } + /* make sure page_sizes_gtt has been populated before use */ + if (i915_is_ggtt(vm) && intel_vm_no_concurrent_access_wa(vm->i915)) + i915_vma_wait_for_bind(vma); + expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1)); expected_node_size = expected_vma_size; diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 48cd617247d1..1260601bda1f 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -26,6 +26,8 @@ #include <linux/prime_numbers.h> #include <linux/sort.h> +#include <drm/drm_print.h> + #include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gem/selftests/mock_context.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index 9c276c9d0a75..8460f0a70d04 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -30,6 +30,7 @@ #include "i915_driver.h" #include "i915_drv.h" +#include "i915_jiffies.h" #include "i915_selftest.h" #include "i915_wait_util.h" #include "igt_flush_test.h" diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 58bcbdcef563..507bf42a1aaf 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -64,7 +64,7 @@ static int intel_fw_table_check(const struct intel_forcewake_range *ranges, static int intel_shadow_table_check(void) { struct { - const struct i915_range *regs; + const struct i915_mmio_range *regs; unsigned int size; } range_lists[] = { { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) }, @@ -74,7 +74,7 @@ static int intel_shadow_table_check(void) { mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) }, { xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) }, }; - const struct i915_range *range; + const struct i915_mmio_range *range; unsigned int i, j; s32 prev; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index fb8751bd5df0..b59626c4994c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -33,6 +33,7 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" #include "gt/mock_engine.h" +#include "i915_driver.h" #include "intel_memory_region.h" #include "intel_region_ttm.h" @@ -183,7 +184,8 @@ struct drm_i915_private *mock_gem_device(void) /* Set up device info and initial runtime info. */ intel_device_info_driver_create(i915, pdev->device, &mock_info); - display = intel_display_device_probe(pdev); + /* FIXME: Can we run selftests using a mock device without display? */ + display = intel_display_device_probe(pdev, i915_driver_parent_interface()); if (IS_ERR(display)) goto err_device; diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index edffaed8f9a7..3e588762709a 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -6,6 +6,7 @@ #include <linux/string_helpers.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "../display/intel_display_core.h" /* FIXME */ @@ -335,7 +336,7 @@ static bool skl_is_16gb_dimm(const struct dram_dimm_info *dimm) { /* Convert total Gb to Gb per DRAM device */ - return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16; + return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) >= 16; } static void @@ -354,7 +355,7 @@ skl_dram_get_dimm_info(struct drm_i915_private *i915, } drm_dbg_kms(&i915->drm, - "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s\n", + "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb+ DIMMs: %s\n", channel, dimm_name, dimm->size, dimm->width, dimm->ranks, str_yes_no(skl_is_16gb_dimm(dimm))); } @@ -384,7 +385,7 @@ skl_dram_get_channel_info(struct drm_i915_private *i915, ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) || skl_is_16gb_dimm(&ch->dimm_s); - drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n", + drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb+ DIMMs: %s\n", channel, ch->ranks, str_yes_no(ch->is_16gb_dimm)); return 0; @@ -406,7 +407,7 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram u32 val; int ret; - /* Assume 16Gb DIMMs are present until proven otherwise */ + /* Assume 16Gb+ DIMMs are present until proven otherwise */ dram_info->has_16gb_dimms = true; val = intel_uncore_read(&i915->uncore, @@ -438,7 +439,7 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n", str_yes_no(dram_info->symmetric_memory)); - drm_dbg_kms(&i915->drm, "16Gb DIMMs: %s\n", + drm_dbg_kms(&i915->drm, "16Gb+ DIMMs: %s\n", str_yes_no(dram_info->has_16gb_dimms)); return 0; @@ -685,6 +686,7 @@ static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info * static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info) { + struct intel_display *display = i915->display; u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL); switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) { @@ -723,6 +725,9 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val); /* PSF GV points not supported in D14+ */ + if (DISPLAY_VER(display) >= 35) + dram_info->ecc_impacting_de_bw = REG_FIELD_GET(XE3P_ECC_IMPACTING_DE, val); + return 0; } diff --git a/drivers/gpu/drm/i915/soc/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h index 03a973f1c941..8475ee379daa 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.h +++ b/drivers/gpu/drm/i915/soc/intel_dram.h @@ -30,6 +30,7 @@ struct dram_info { u8 num_channels; u8 num_qgv_points; u8 num_psf_gv_points; + bool ecc_impacting_de_bw; /* Only valid from Xe3p_LPD onward. */ bool symmetric_memory; bool has_16gb_dimms; }; diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c index f210c9655b53..271da30c8290 100644 --- a/drivers/gpu/drm/i915/soc/intel_gmch.c +++ b/drivers/gpu/drm/i915/soc/intel_gmch.c @@ -8,6 +8,7 @@ #include <linux/vgaarb.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <drm/intel/i915_drm.h> #include "../display/intel_display_core.h" /* FIXME */ diff --git a/drivers/gpu/drm/i915/soc/intel_rom.c b/drivers/gpu/drm/i915/soc/intel_rom.c index 243d98cab8c3..2f17dc856e7f 100644 --- a/drivers/gpu/drm/i915/soc/intel_rom.c +++ b/drivers/gpu/drm/i915/soc/intel_rom.c @@ -39,8 +39,9 @@ static u16 spi_read16(struct intel_rom *rom, loff_t offset) return spi_read32(rom, offset) & 0xffff; } -struct intel_rom *intel_rom_spi(struct drm_i915_private *i915) +struct intel_rom *intel_rom_spi(struct drm_device *drm) { + struct drm_i915_private *i915 = to_i915(drm); struct intel_rom *rom; u32 static_region; @@ -85,7 +86,7 @@ static void pci_free(struct intel_rom *rom) pci_unmap_rom(rom->pdev, rom->oprom); } -struct intel_rom *intel_rom_pci(struct drm_i915_private *i915) +struct intel_rom *intel_rom_pci(struct drm_device *drm) { struct intel_rom *rom; @@ -93,7 +94,7 @@ struct intel_rom *intel_rom_pci(struct drm_i915_private *i915) if (!rom) return NULL; - rom->pdev = to_pci_dev(i915->drm.dev); + rom->pdev = to_pci_dev(drm->dev); rom->oprom = pci_map_rom(rom->pdev, &rom->size); if (!rom->oprom) { diff --git a/drivers/gpu/drm/i915/soc/intel_rom.h b/drivers/gpu/drm/i915/soc/intel_rom.h index fb2979c8ef7f..4e59a375787e 100644 --- a/drivers/gpu/drm/i915/soc/intel_rom.h +++ b/drivers/gpu/drm/i915/soc/intel_rom.h @@ -8,11 +8,11 @@ #include <linux/types.h> -struct drm_i915_private; +struct drm_device; struct intel_rom; -struct intel_rom *intel_rom_spi(struct drm_i915_private *i915); -struct intel_rom *intel_rom_pci(struct drm_i915_private *i915); +struct intel_rom *intel_rom_spi(struct drm_device *drm); +struct intel_rom *intel_rom_pci(struct drm_device *drm); u32 intel_rom_read32(struct intel_rom *rom, loff_t offset); u16 intel_rom_read16(struct intel_rom *rom, loff_t offset); diff --git a/drivers/gpu/drm/i915/vlv_iosf_sb.c b/drivers/gpu/drm/i915/vlv_iosf_sb.c index f4b386933141..38a75651b0dc 100644 --- a/drivers/gpu/drm/i915/vlv_iosf_sb.c +++ b/drivers/gpu/drm/i915/vlv_iosf_sb.c @@ -3,6 +3,8 @@ * Copyright © 2013-2021 Intel Corporation */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_iosf_mbi.h" #include "i915_reg.h" diff --git a/drivers/gpu/drm/imagination/Kconfig b/drivers/gpu/drm/imagination/Kconfig index 682dd2633d0c..0482bfcefdde 100644 --- a/drivers/gpu/drm/imagination/Kconfig +++ b/drivers/gpu/drm/imagination/Kconfig @@ -7,6 +7,7 @@ config DRM_POWERVR depends on DRM depends on MMU depends on PM + depends on POWER_SEQUENCING || !POWER_SEQUENCING select DRM_EXEC select DRM_GEM_SHMEM_HELPER select DRM_SCHED diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c index 2bbdc05a3b97..9294b4ba1de7 100644 --- a/drivers/gpu/drm/imagination/pvr_ccb.c +++ b/drivers/gpu/drm/imagination/pvr_ccb.c @@ -10,6 +10,7 @@ #include "pvr_power.h" #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/jiffies.h> diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c index 294b6019b415..78d6b8a0a450 100644 --- a/drivers/gpu/drm/imagination/pvr_device.c +++ b/drivers/gpu/drm/imagination/pvr_device.c @@ -48,7 +48,7 @@ * * Return: * * 0 on success, or - * * Any error returned by devm_platform_ioremap_resource(). + * * Any error returned by devm_platform_get_and_ioremap_resource(). */ static int pvr_device_reg_init(struct pvr_device *pvr_dev) diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index ab8f56ae15df..ec53ff275541 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -146,6 +146,14 @@ struct pvr_device { */ struct clk *mem_clk; + /** + * @power: Optional power domain devices. + * + * On platforms with more than one power domain for the GPU, they are + * stored here in @domain_devs, along with links between them in + * @domain_links. The size of @domain_devs is given by @domain_count, + * while the size of @domain_links is (2 * @domain_count) - 1. + */ struct pvr_device_power { struct device **domain_devs; struct device_link **domain_links; diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c index b2f8cba77346..779a58fe6ee8 100644 --- a/drivers/gpu/drm/imagination/pvr_fw.c +++ b/drivers/gpu/drm/imagination/pvr_fw.c @@ -17,6 +17,7 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> #include <drm/drm_mm.h> +#include <drm/drm_print.h> #include <linux/clk.h> #include <linux/firmware.h> #include <linux/math.h> diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c index 60db3668ad3c..9ff03bc60a08 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_meta.c +++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c @@ -16,6 +16,8 @@ #include <linux/ktime.h> #include <linux/types.h> +#include <drm/drm_print.h> + #define ROGUE_FW_HEAP_META_SHIFT 25 /* 32 MB */ #define POLL_TIMEOUT_USEC 1000000 diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c index a1098b521485..8a56952f6730 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_trace.c +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c @@ -9,6 +9,7 @@ #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include <linux/build_bug.h> #include <linux/dcache.h> diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c index c6e7ff9e935d..b9f801c63260 100644 --- a/drivers/gpu/drm/imagination/pvr_power.c +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -10,6 +10,7 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <linux/cleanup.h> #include <linux/clk.h> #include <linux/interrupt.h> diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 3d97990170bf..48e52c5561be 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -13,6 +13,7 @@ #include <drm/drm_exec.h> #include <drm/drm_gem.h> #include <drm/drm_gpuvm.h> +#include <drm/drm_print.h> #include <linux/bug.h> #include <linux/container_of.h> diff --git a/drivers/gpu/drm/imx/dc/dc-ed.c b/drivers/gpu/drm/imx/dc/dc-ed.c index 86ecc22d0a55..d42f33d6f3fc 100644 --- a/drivers/gpu/drm/imx/dc/dc-ed.c +++ b/drivers/gpu/drm/imx/dc/dc-ed.c @@ -15,12 +15,12 @@ #include "dc-pe.h" #define PIXENGCFG_STATIC 0x8 -#define POWERDOWN BIT(4) -#define SYNC_MODE BIT(8) -#define SINGLE 0 #define DIV_MASK GENMASK(23, 16) #define DIV(x) FIELD_PREP(DIV_MASK, (x)) #define DIV_RESET 0x80 +#define SYNC_MODE BIT(8) +#define SINGLE 0 +#define POWERDOWN BIT(4) #define PIXENGCFG_DYNAMIC 0xc @@ -28,9 +28,9 @@ #define SYNC_TRIGGER BIT(0) #define STATICCONTROL 0x8 +#define PERFCOUNTMODE BIT(12) #define KICK_MODE BIT(8) #define EXTERNAL BIT(8) -#define PERFCOUNTMODE BIT(12) #define CONTROL 0xc #define GAMMAAPPLYENABLE BIT(0) diff --git a/drivers/gpu/drm/imx/dc/dc-fg.c b/drivers/gpu/drm/imx/dc/dc-fg.c index 7f6c1852bf72..28f372be9247 100644 --- a/drivers/gpu/drm/imx/dc/dc-fg.c +++ b/drivers/gpu/drm/imx/dc/dc-fg.c @@ -56,9 +56,9 @@ #define FGINCTRL 0x5c #define FGINCTRLPANIC 0x60 -#define FGDM_MASK GENMASK(2, 0) -#define ENPRIMALPHA BIT(3) #define ENSECALPHA BIT(4) +#define ENPRIMALPHA BIT(3) +#define FGDM_MASK GENMASK(2, 0) #define FGCCR 0x64 #define CCGREEN(x) FIELD_PREP(GENMASK(19, 10), (x)) diff --git a/drivers/gpu/drm/imx/dc/dc-fu.c b/drivers/gpu/drm/imx/dc/dc-fu.c index f94c591c8158..1d8f74babef8 100644 --- a/drivers/gpu/drm/imx/dc/dc-fu.c +++ b/drivers/gpu/drm/imx/dc/dc-fu.c @@ -18,11 +18,11 @@ #define BASEADDRESSAUTOUPDATE(x) FIELD_PREP(BASEADDRESSAUTOUPDATE_MASK, (x)) /* BURSTBUFFERMANAGEMENT */ +#define LINEMODE_MASK BIT(31) #define SETBURSTLENGTH_MASK GENMASK(12, 8) #define SETBURSTLENGTH(x) FIELD_PREP(SETBURSTLENGTH_MASK, (x)) #define SETNUMBUFFERS_MASK GENMASK(7, 0) #define SETNUMBUFFERS(x) FIELD_PREP(SETNUMBUFFERS_MASK, (x)) -#define LINEMODE_MASK BIT(31) /* SOURCEBUFFERATTRIBUTES */ #define BITSPERPIXEL_MASK GENMASK(21, 16) @@ -31,20 +31,20 @@ #define STRIDE(x) FIELD_PREP(STRIDE_MASK, (x) - 1) /* SOURCEBUFFERDIMENSION */ -#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x)) #define LINECOUNT(x) FIELD_PREP(GENMASK(29, 16), (x)) +#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x)) /* LAYEROFFSET */ -#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x)) #define LAYERYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x)) +#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x)) /* CLIPWINDOWOFFSET */ -#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x)) #define CLIPWINDOWYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x)) +#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x)) /* CLIPWINDOWDIMENSIONS */ -#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1) #define CLIPWINDOWHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x) - 1) +#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1) enum dc_linemode { /* diff --git a/drivers/gpu/drm/imx/dc/dc-fu.h b/drivers/gpu/drm/imx/dc/dc-fu.h index e016e1ea5b4e..f678de3ca8c0 100644 --- a/drivers/gpu/drm/imx/dc/dc-fu.h +++ b/drivers/gpu/drm/imx/dc/dc-fu.h @@ -33,13 +33,13 @@ #define A_SHIFT(x) FIELD_PREP_CONST(GENMASK(4, 0), (x)) /* LAYERPROPERTY */ +#define SOURCEBUFFERENABLE BIT(31) #define YUVCONVERSIONMODE_MASK GENMASK(18, 17) #define YUVCONVERSIONMODE(x) FIELD_PREP(YUVCONVERSIONMODE_MASK, (x)) -#define SOURCEBUFFERENABLE BIT(31) /* FRAMEDIMENSIONS */ -#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x)) #define FRAMEHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x)) +#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x)) /* CONTROL */ #define INPUTSELECT_MASK GENMASK(4, 3) diff --git a/drivers/gpu/drm/imx/dc/dc-lb.c b/drivers/gpu/drm/imx/dc/dc-lb.c index 38f966625d38..ca1d714c8d6e 100644 --- a/drivers/gpu/drm/imx/dc/dc-lb.c +++ b/drivers/gpu/drm/imx/dc/dc-lb.c @@ -17,12 +17,12 @@ #include "dc-pe.h" #define PIXENGCFG_DYNAMIC 0x8 -#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0) -#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \ - FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x)) #define PIXENGCFG_DYNAMIC_SEC_SEL_MASK GENMASK(13, 8) #define PIXENGCFG_DYNAMIC_SEC_SEL(x) \ FIELD_PREP(PIXENGCFG_DYNAMIC_SEC_SEL_MASK, (x)) +#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0) +#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \ + FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x)) #define STATICCONTROL 0x8 #define SHDTOKSEL_MASK GENMASK(4, 3) @@ -37,24 +37,24 @@ #define BLENDCONTROL 0x10 #define ALPHA_MASK GENMASK(23, 16) #define ALPHA(x) FIELD_PREP(ALPHA_MASK, (x)) -#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0) -#define PRIM_C_BLD_FUNC(x) \ - FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x)) -#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4) -#define SEC_C_BLD_FUNC(x) \ - FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x)) -#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8) -#define PRIM_A_BLD_FUNC(x) \ - FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x)) #define SEC_A_BLD_FUNC_MASK GENMASK(14, 12) #define SEC_A_BLD_FUNC(x) \ FIELD_PREP(SEC_A_BLD_FUNC_MASK, (x)) +#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8) +#define PRIM_A_BLD_FUNC(x) \ + FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x)) +#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4) +#define SEC_C_BLD_FUNC(x) \ + FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x)) +#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0) +#define PRIM_C_BLD_FUNC(x) \ + FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x)) #define POSITION 0x14 -#define XPOS_MASK GENMASK(15, 0) -#define XPOS(x) FIELD_PREP(XPOS_MASK, (x)) #define YPOS_MASK GENMASK(31, 16) #define YPOS(x) FIELD_PREP(YPOS_MASK, (x)) +#define XPOS_MASK GENMASK(15, 0) +#define XPOS(x) FIELD_PREP(XPOS_MASK, (x)) enum dc_lb_blend_func { DC_LAYERBLEND_BLEND_ZERO, diff --git a/drivers/gpu/drm/imx/dc/dc-plane.c b/drivers/gpu/drm/imx/dc/dc-plane.c index d8b946fb90de..e40d5d66c5c1 100644 --- a/drivers/gpu/drm/imx/dc/dc-plane.c +++ b/drivers/gpu/drm/imx/dc/dc-plane.c @@ -106,7 +106,7 @@ dc_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) } crtc_state = - drm_atomic_get_existing_crtc_state(state, plane_state->crtc); + drm_atomic_get_new_crtc_state(state, plane_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c index ab6d32bad756..0b99b407ac0a 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-plane.c +++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c @@ -10,6 +10,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include "dcss-dev.h" #include "dcss-kms.h" @@ -159,8 +160,8 @@ static int dcss_plane_atomic_check(struct drm_plane *plane, dma_obj = drm_fb_dma_get_gem_obj(fb, 0); WARN_ON(!dma_obj); - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); hdisplay = crtc_state->adjusted_mode.hdisplay; vdisplay = crtc_state->adjusted_mode.vdisplay; diff --git a/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c index 8333c4bf7369..07e5f96202d4 100644 --- a/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c @@ -278,4 +278,3 @@ MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>"); MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>"); MODULE_DESCRIPTION("IMX6 Specific DW-HDMI Driver Extension"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:dwhdmi-imx"); diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c index ec5fd9a01f1e..eddb471119c6 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c @@ -17,7 +17,9 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> @@ -141,17 +143,34 @@ static int imx_drm_dumb_create(struct drm_file *file_priv, struct drm_device *drm, struct drm_mode_create_dumb *args) { - u32 width = args->width; + u32 fourcc; + u64 pitch_align; int ret; - args->width = ALIGN(width, 8); - - ret = drm_gem_dma_dumb_create(file_priv, drm, args); + /* + * Hardware requires the framebuffer width to be aligned to + * multiples of 8. The mode-setting code handles this, but + * the buffer pitch has to be aligned as well. Set the pitch + * alignment accordingly, so that the each scanline fits into + * the allocated buffer. + */ + fourcc = drm_driver_color_mode_format(drm, args->bpp); + if (fourcc != DRM_FORMAT_INVALID) { + const struct drm_format_info *info = drm_format_info(fourcc); + + if (!info) + return -EINVAL; + pitch_align = drm_format_info_min_pitch(info, 0, 8); + } else { + pitch_align = DIV_ROUND_UP(args->bpp, SZ_8) * 8; + } + if (!pitch_align || pitch_align > U32_MAX) + return -EINVAL; + ret = drm_mode_size_dumb(drm, args, pitch_align, 0); if (ret) return ret; - args->width = width; - return ret; + return drm_gem_dma_dumb_create(file_priv, drm, args); } static const struct drm_driver imx_drm_driver = { diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c index 6be7a57ad03d..626d410d9150 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c @@ -644,4 +644,3 @@ module_platform_driver(imx_ldb_driver); MODULE_DESCRIPTION("i.MX LVDS driver"); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c index c5629e155d25..c5c6e070cc06 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c @@ -368,17 +368,20 @@ static unsigned long clk_tve_di_recalc_rate(struct clk_hw *hw, return 0; } -static long clk_tve_di_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int clk_tve_di_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { unsigned long div; - div = *prate / rate; + div = req->best_parent_rate / req->rate; if (div >= 4) - return *prate / 4; + req->rate = req->best_parent_rate / 4; else if (div >= 2) - return *prate / 2; - return *prate; + req->rate = req->best_parent_rate / 2; + else + req->rate = req->best_parent_rate; + + return 0; } static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate, @@ -409,7 +412,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate, } static const struct clk_ops clk_tve_di_ops = { - .round_rate = clk_tve_di_round_rate, + .determine_rate = clk_tve_di_determine_rate, .set_rate = clk_tve_di_set_rate, .recalc_rate = clk_tve_di_recalc_rate, }; @@ -674,4 +677,3 @@ module_platform_driver(imx_tve_driver); MODULE_DESCRIPTION("i.MX Television Encoder driver"); MODULE_AUTHOR("Philipp Zabel, Pengutronix"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-tve"); diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c index 704c549750f9..db50eccea0ca 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c @@ -14,6 +14,7 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <video/imx-ipu-v3.h> @@ -386,8 +387,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, return -EINVAL; crtc_state = - drm_atomic_get_existing_crtc_state(state, - new_state->crtc); + drm_atomic_get_new_crtc_state(state, new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 7fc6af703307..6fbf505d2801 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -133,10 +133,10 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); struct drm_display_info *di = &conn_state->connector->display_info; struct drm_bridge_state *next_bridge_state = NULL; - struct drm_bridge *next_bridge; u32 bus_flags, bus_fmt; - next_bridge = drm_bridge_get_next_bridge(bridge); + struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge); + if (next_bridge) next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, next_bridge); @@ -286,4 +286,3 @@ module_platform_driver(imx_pd_driver); MODULE_DESCRIPTION("i.MX parallel display driver"); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-parallel-display"); diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c index 8d6a0bb31c48..e200b40f30fe 100644 --- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c +++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c @@ -14,6 +14,7 @@ #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 9db1ceaed518..d3213fbf22be 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -247,8 +247,8 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct ingenic_drm_private_state *priv_state; unsigned int next_id; - priv_state = ingenic_drm_get_priv_state(priv, state); - if (WARN_ON(IS_ERR(priv_state))) + priv_state = ingenic_drm_get_new_priv_state(priv, state); + if (WARN_ON(!priv_state)) return; /* Set addresses of our DMA descriptor chains */ @@ -340,6 +340,7 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc, crtc); struct ingenic_drm *priv = drm_crtc_get_priv(crtc); struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL; + struct ingenic_drm_private_state *priv_state; if (crtc_state->gamma_lut && drm_color_lut_size(crtc_state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) { @@ -347,6 +348,11 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc, return -EINVAL; } + /* We will need the state in atomic_enable, so let's make sure it's part of the state */ + priv_state = ingenic_drm_get_priv_state(priv, state); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + if (drm_atomic_crtc_needs_modeset(crtc_state) && priv->soc_info->has_osd) { f1_state = drm_atomic_get_plane_state(crtc_state->state, &priv->f1); @@ -471,8 +477,7 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, if (priv->soc_info->plane_f0_not_working && plane == &priv->f0) return -EINVAL; - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index 26ebf424d63e..32638a713241 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -580,7 +580,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -705,7 +705,7 @@ ingenic_ipu_plane_atomic_set_property(struct drm_plane *plane, ipu->sharpness = val; if (state->crtc) { - crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index 32cda134ae3e..7c2eb1152fc2 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -20,6 +20,7 @@ #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c index 9e0562aa2bcb..a935ff1503cd 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.c +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -12,6 +12,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "kmb_drv.h" #include "kmb_plane.h" @@ -129,8 +130,7 @@ static int kmb_plane_atomic_check(struct drm_plane *plane, } can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY); crtc_state = - drm_atomic_get_existing_crtc_state(state, - new_plane_state->crtc); + drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); return drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_NO_SCALING, diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 739e8c6c6d90..9a1e6b9ecbe5 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -8,6 +8,8 @@ #include <linux/vmalloc.h> #include <linux/pm_runtime.h> +#include <drm/drm_print.h> + #include "lima_devfreq.h" #include "lima_drv.h" #include "lima_sched.h" diff --git a/drivers/gpu/drm/logicvc/logicvc_layer.c b/drivers/gpu/drm/logicvc/logicvc_layer.c index 464000aea765..eab4d773f92b 100644 --- a/drivers/gpu/drm/logicvc/logicvc_layer.c +++ b/drivers/gpu/drm/logicvc/logicvc_layer.c @@ -96,8 +96,8 @@ static int logicvc_plane_atomic_check(struct drm_plane *drm_plane, if (!new_state->crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(new_state->state, - new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(new_state->state, + new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/loongson/lsdc_benchmark.c b/drivers/gpu/drm/loongson/lsdc_benchmark.c index b088646a2ff9..659173381814 100644 --- a/drivers/gpu/drm/loongson/lsdc_benchmark.c +++ b/drivers/gpu/drm/loongson/lsdc_benchmark.c @@ -4,6 +4,7 @@ */ #include <drm/drm_debugfs.h> +#include <drm/drm_print.h> #include "lsdc_benchmark.h" #include "lsdc_drv.h" diff --git a/drivers/gpu/drm/loongson/lsdc_crtc.c b/drivers/gpu/drm/loongson/lsdc_crtc.c index 03958b79f251..a5b7d5c5fd20 100644 --- a/drivers/gpu/drm/loongson/lsdc_crtc.c +++ b/drivers/gpu/drm/loongson/lsdc_crtc.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_debugfs.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "lsdc_drv.h" diff --git a/drivers/gpu/drm/loongson/lsdc_debugfs.c b/drivers/gpu/drm/loongson/lsdc_debugfs.c index b9c2e6b1701f..19aa7ef577de 100644 --- a/drivers/gpu/drm/loongson/lsdc_debugfs.c +++ b/drivers/gpu/drm/loongson/lsdc_debugfs.c @@ -4,6 +4,7 @@ */ #include <drm/drm_debugfs.h> +#include <drm/drm_print.h> #include "lsdc_benchmark.h" #include "lsdc_drv.h" diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c index 12193d2a301a..abf5bf68eec2 100644 --- a/drivers/gpu/drm/loongson/lsdc_drv.c +++ b/drivers/gpu/drm/loongson/lsdc_drv.c @@ -15,6 +15,7 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c index a720d8f53209..6372db2d3093 100644 --- a/drivers/gpu/drm/loongson/lsdc_gem.c +++ b/drivers/gpu/drm/loongson/lsdc_gem.c @@ -6,9 +6,11 @@ #include <linux/dma-buf.h> #include <drm/drm_debugfs.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include "lsdc_drv.h" #include "lsdc_gem.h" @@ -57,7 +59,7 @@ static void lsdc_gem_object_free(struct drm_gem_object *obj) struct ttm_buffer_object *tbo = to_ttm_bo(obj); if (tbo) - ttm_bo_put(tbo); + ttm_bo_fini(tbo); } static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map) @@ -204,45 +206,31 @@ int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev, const struct lsdc_desc *descp = ldev->descp; u32 domain = LSDC_GEM_DOMAIN_VRAM; struct drm_gem_object *gobj; - size_t size; - u32 pitch; - u32 handle; int ret; - if (!args->width || !args->height) - return -EINVAL; - - if (args->bpp != 32 && args->bpp != 16) - return -EINVAL; - - pitch = args->width * args->bpp / 8; - pitch = ALIGN(pitch, descp->pitch_align); - size = pitch * args->height; - size = ALIGN(size, PAGE_SIZE); + ret = drm_mode_size_dumb(ddev, args, descp->pitch_align, 0); + if (ret) + return ret; /* Maximum single bo size allowed is the half vram size available */ - if (size > ldev->vram_size / 2) { - drm_err(ddev, "Requesting(%zuMiB) failed\n", size >> 20); + if (args->size > ldev->vram_size / 2) { + drm_err(ddev, "Requesting(%zuMiB) failed\n", (size_t)(args->size >> PAGE_SHIFT)); return -ENOMEM; } - gobj = lsdc_gem_object_create(ddev, domain, size, false, NULL, NULL); + gobj = lsdc_gem_object_create(ddev, domain, args->size, false, NULL, NULL); if (IS_ERR(gobj)) { drm_err(ddev, "Failed to create gem object\n"); return PTR_ERR(gobj); } - ret = drm_gem_handle_create(file, gobj, &handle); + ret = drm_gem_handle_create(file, gobj, &args->handle); /* drop reference from allocate, handle holds it now */ drm_gem_object_put(gobj); if (ret) return ret; - args->pitch = pitch; - args->size = size; - args->handle = handle; - return 0; } diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.c b/drivers/gpu/drm/loongson/lsdc_i2c.c index ce90c25536d2..012b4761c538 100644 --- a/drivers/gpu/drm/loongson/lsdc_i2c.c +++ b/drivers/gpu/drm/loongson/lsdc_i2c.c @@ -4,6 +4,7 @@ */ #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "lsdc_drv.h" #include "lsdc_output.h" diff --git a/drivers/gpu/drm/loongson/lsdc_irq.c b/drivers/gpu/drm/loongson/lsdc_irq.c index efdc4d10792d..e8b7cc327f04 100644 --- a/drivers/gpu/drm/loongson/lsdc_irq.c +++ b/drivers/gpu/drm/loongson/lsdc_irq.c @@ -3,6 +3,7 @@ * Copyright (C) 2023 Loongson Technology Corporation Limited */ +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "lsdc_irq.h" diff --git a/drivers/gpu/drm/loongson/lsdc_output_7a1000.c b/drivers/gpu/drm/loongson/lsdc_output_7a1000.c index 600ed4fb0884..ccca67e01fd9 100644 --- a/drivers/gpu/drm/loongson/lsdc_output_7a1000.c +++ b/drivers/gpu/drm/loongson/lsdc_output_7a1000.c @@ -5,6 +5,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "lsdc_drv.h" diff --git a/drivers/gpu/drm/loongson/lsdc_output_7a2000.c b/drivers/gpu/drm/loongson/lsdc_output_7a2000.c index 2bd797a9b9ff..aa7daee4c065 100644 --- a/drivers/gpu/drm/loongson/lsdc_output_7a2000.c +++ b/drivers/gpu/drm/loongson/lsdc_output_7a2000.c @@ -8,6 +8,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_debugfs.h> #include <drm/drm_edid.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "lsdc_drv.h" diff --git a/drivers/gpu/drm/loongson/lsdc_pixpll.c b/drivers/gpu/drm/loongson/lsdc_pixpll.c index 2609a2256da4..51b9a032cf43 100644 --- a/drivers/gpu/drm/loongson/lsdc_pixpll.c +++ b/drivers/gpu/drm/loongson/lsdc_pixpll.c @@ -6,6 +6,7 @@ #include <linux/delay.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "lsdc_drv.h" diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c index aa9a97f9c4dc..9675344128d0 100644 --- a/drivers/gpu/drm/loongson/lsdc_plane.c +++ b/drivers/gpu/drm/loongson/lsdc_plane.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include "lsdc_drv.h" #include "lsdc_regs.h" @@ -196,7 +197,7 @@ static int lsdc_cursor_plane_atomic_async_check(struct drm_plane *plane, return -EINVAL; } - crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); if (!crtc_state->active) return -EINVAL; diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c index 2e42c6970c9f..5d9075634bf8 100644 --- a/drivers/gpu/drm/loongson/lsdc_ttm.c +++ b/drivers/gpu/drm/loongson/lsdc_ttm.c @@ -8,6 +8,7 @@ #include <drm/drm_gem.h> #include <drm/drm_managed.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include "lsdc_drv.h" #include "lsdc_ttm.h" @@ -544,7 +545,8 @@ int lsdc_ttm_init(struct lsdc_device *ldev) ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev, ddev->anon_inode->i_mapping, - ddev->vma_offset_manager, false, true); + ddev->vma_offset_manager, + TTM_ALLOCATION_POOL_USE_DMA32); if (ret) return ret; diff --git a/drivers/gpu/drm/mcde/mcde_clk_div.c b/drivers/gpu/drm/mcde/mcde_clk_div.c index 3056ac566473..8c5af2677357 100644 --- a/drivers/gpu/drm/mcde/mcde_clk_div.c +++ b/drivers/gpu/drm/mcde/mcde_clk_div.c @@ -71,12 +71,15 @@ static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, return best_div; } -static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int mcde_clk_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - int div = mcde_clk_div_choose_div(hw, rate, prate, true); + int div = mcde_clk_div_choose_div(hw, req->rate, + &req->best_parent_rate, true); - return DIV_ROUND_UP_ULL(*prate, div); + req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div); + + return 0; } static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw, @@ -132,7 +135,7 @@ static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops mcde_clk_div_ops = { .enable = mcde_clk_div_enable, .recalc_rate = mcde_clk_div_recalc_rate, - .round_rate = mcde_clk_div_round_rate, + .determine_rate = mcde_clk_div_determine_rate, .set_rate = mcde_clk_div_set_rate, }; diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index 52043a12a2e8..257a6e84dd58 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -17,6 +17,7 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_mipi_dsi.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index e47debd60619..96188bf9274a 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -30,9 +30,30 @@ config DRM_MEDIATEK_DP help DRM/KMS Display Port driver for MediaTek SoCs. +config DRM_MEDIATEK_HDMI_COMMON + tristate + depends on DRM_MEDIATEK + select DRM_DISPLAY_HDMI_HELPER + select DRM_DISPLAY_HELPER + select SND_SOC_HDMI_CODEC if SND_SOC + help + MediaTek SoC HDMI common library + config DRM_MEDIATEK_HDMI tristate "DRM HDMI Support for Mediatek SoCs" depends on DRM_MEDIATEK - select SND_SOC_HDMI_CODEC if SND_SOC + select DRM_MEDIATEK_HDMI_COMMON help DRM/KMS HDMI driver for Mediatek SoCs + +config DRM_MEDIATEK_HDMI_V2 + tristate "DRM HDMI v2 IP support for MediaTek SoCs" + depends on DRM_MEDIATEK + select DRM_MEDIATEK_HDMI_COMMON + help + Say yes here to enable support for the HDMIv2 IP and related + DDCv2 as found in the MediaTek MT8195, MT8188 SoCs and other + variants. + This driver can also be built as a module. If so, the HDMIv2 + module will be called "mtk_hdmi_v2", and the DDCv2 module + will be called "mtk_hdmi_ddc_v2". diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index 43afd0a26d14..e0ac49b07d50 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -21,8 +21,11 @@ mediatek-drm-y := mtk_crtc.o \ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o +obj-$(CONFIG_DRM_MEDIATEK_HDMI_COMMON) += mtk_hdmi_common.o obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_cec.o obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi.o obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi_ddc.o +obj-$(CONFIG_DRM_MEDIATEK_HDMI_V2) += mtk_hdmi_v2.o +obj-$(CONFIG_DRM_MEDIATEK_HDMI_V2) += mtk_hdmi_ddc_v2.o obj-$(CONFIG_DRM_MEDIATEK_DP) += mtk_dp.o diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index bc7527542fdc..991cdb3d7d5f 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -16,6 +16,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -283,6 +284,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) unsigned int i; unsigned long flags; + /* release GCE HW usage and start autosuspend */ + pm_runtime_mark_last_busy(cmdq_cl->chan->mbox->dev); + pm_runtime_put_autosuspend(cmdq_cl->chan->mbox->dev); + if (data->sta < 0) return; @@ -618,6 +623,9 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank) mtk_crtc->config_updating = false; spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + if (pm_runtime_resume_and_get(mtk_crtc->cmdq_client.chan->mbox->dev) < 0) + goto update_config_out; + mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); goto update_config_out; diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c index ac6620e10262..9672ea1f91a2 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -621,15 +621,27 @@ int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev) return ret; } -int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, +static void mtk_ddp_comp_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + +static void mtk_ddp_comp_clk_put(void *_clk) +{ + struct clk *clk = _clk; + + clk_put(clk); +} + +int mtk_ddp_comp_init(struct device *dev, struct device_node *node, struct mtk_ddp_comp *comp, unsigned int comp_id) { struct platform_device *comp_pdev; enum mtk_ddp_comp_type type; struct mtk_ddp_comp_dev *priv; -#if IS_REACHABLE(CONFIG_MTK_CMDQ) int ret; -#endif if (comp_id >= DDP_COMPONENT_DRM_ID_MAX) return -EINVAL; @@ -651,6 +663,10 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, } comp->dev = &comp_pdev->dev; + ret = devm_add_action_or_reset(dev, mtk_ddp_comp_put_device, comp->dev); + if (ret) + return ret; + if (type == MTK_DISP_AAL || type == MTK_DISP_BLS || type == MTK_DISP_CCORR || @@ -666,15 +682,22 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, type == MTK_DSI) return 0; - priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - priv->regs = of_iomap(node, 0); + priv->regs = devm_of_iomap(dev, node, 0, NULL); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + priv->clk = of_clk_get(node, 0); if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); + ret = devm_add_action_or_reset(dev, mtk_ddp_comp_clk_put, priv->clk); + if (ret) + return ret; + #if IS_REACHABLE(CONFIG_MTK_CMDQ) ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0); if (ret) diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h index 7289b3dcf22f..3f3d43f4330d 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -350,7 +350,7 @@ static inline void mtk_ddp_comp_encoder_index_set(struct mtk_ddp_comp *comp) int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev); -int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp, +int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, struct mtk_ddp_comp *comp, unsigned int comp_id); enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id); void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c index 10d60d2c2a56..6d7bf4afa78d 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c @@ -80,27 +80,6 @@ void mtk_ccorr_stop(struct device *dev) writel_relaxed(0x0, ccorr->regs + DISP_CCORR_EN); } -/* Converts a DRM S31.32 value to the HW S1.n format. */ -static u16 mtk_ctm_s31_32_to_s1_n(u64 in, u32 n) -{ - u16 r; - - /* Sign bit. */ - r = in & BIT_ULL(63) ? BIT(n + 1) : 0; - - if ((in & GENMASK_ULL(62, 33)) > 0) { - /* identity value 0x100000000 -> 0x400(mt8183), */ - /* identity value 0x100000000 -> 0x800(mt8192), */ - /* if bigger this, set it to max 0x7ff. */ - r |= GENMASK(n, 0); - } else { - /* take the n+1 most important bits. */ - r |= (in >> (32 - n)) & GENMASK(n, 0); - } - - return r; -} - void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state) { struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev); @@ -119,7 +98,7 @@ void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state) input = ctm->matrix; for (i = 0; i < ARRAY_SIZE(coeffs); i++) - coeffs[i] = mtk_ctm_s31_32_to_s1_n(input[i], matrix_bits); + coeffs[i] = drm_color_ctm_s31_32_to_qm_n(input[i], 2, matrix_bits); mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1], &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_0); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index fe97bb97e004..c0af3e3b51d5 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -527,6 +527,13 @@ bool mtk_ovl_adaptor_is_comp_present(struct device_node *node) type == OVL_ADAPTOR_TYPE_PADDING; } +static void ovl_adaptor_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match) { struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev); @@ -560,6 +567,11 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma if (!comp_pdev) return -EPROBE_DEFER; + ret = devm_add_action_or_reset(dev, ovl_adaptor_put_device, + &comp_pdev->dev); + if (ret) + return ret; + priv->ovl_adaptor_comp[id] = &comp_pdev->dev; drm_of_component_match_add(dev, match, component_compare_of, node); diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index bef6eeb30d3e..b0b1e158600f 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -2087,6 +2087,7 @@ static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp, endpoint = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 1, -1); len = of_property_count_elems_of_size(endpoint, "data-lanes", sizeof(u32)); + of_node_put(endpoint); if (len < 0 || len > 4 || len == 3) { dev_err(dev, "invalid data lane size: %d\n", len); return -EINVAL; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 31ff2922758a..a94c51a83261 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -1123,7 +1123,7 @@ static int mtk_drm_probe(struct platform_device *pdev) (void *)private->mmsys_dev, sizeof(*private->mmsys_dev)); private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR].dev = &ovl_adaptor->dev; - mtk_ddp_comp_init(NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR], + mtk_ddp_comp_init(dev, NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR], DDP_COMPONENT_DRM_OVL_ADAPTOR); component_match_add(dev, &match, compare_dev, &ovl_adaptor->dev); } @@ -1189,7 +1189,7 @@ static int mtk_drm_probe(struct platform_device *pdev) node); } - ret = mtk_ddp_comp_init(node, &private->ddp_comp[comp_id], comp_id); + ret = mtk_ddp_comp_init(dev, node, &private->ddp_comp[comp_id], comp_id); if (ret) { of_node_put(node); goto err_node; diff --git a/drivers/gpu/drm/mediatek/mtk_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c index a172456d1d7b..024cc7e9036c 100644 --- a/drivers/gpu/drm/mediatek/mtk_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_gem.c @@ -11,6 +11,7 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include "mtk_drm_drv.h" #include "mtk_gem.h" diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index b766dd5e6c8d..0face4dcaa36 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -31,6 +31,7 @@ #include <drm/drm_probe_helper.h> #include "mtk_cec.h" +#include "mtk_hdmi_common.h" #include "mtk_hdmi_regs.h" #define NCTS_BYTES 7 @@ -43,143 +44,6 @@ enum mtk_hdmi_clk_id { MTK_HDMI_CLK_COUNT }; -enum hdmi_aud_input_type { - HDMI_AUD_INPUT_I2S = 0, - HDMI_AUD_INPUT_SPDIF, -}; - -enum hdmi_aud_i2s_fmt { - HDMI_I2S_MODE_RJT_24BIT = 0, - HDMI_I2S_MODE_RJT_16BIT, - HDMI_I2S_MODE_LJT_24BIT, - HDMI_I2S_MODE_LJT_16BIT, - HDMI_I2S_MODE_I2S_24BIT, - HDMI_I2S_MODE_I2S_16BIT -}; - -enum hdmi_aud_mclk { - HDMI_AUD_MCLK_128FS, - HDMI_AUD_MCLK_192FS, - HDMI_AUD_MCLK_256FS, - HDMI_AUD_MCLK_384FS, - HDMI_AUD_MCLK_512FS, - HDMI_AUD_MCLK_768FS, - HDMI_AUD_MCLK_1152FS, -}; - -enum hdmi_aud_channel_type { - HDMI_AUD_CHAN_TYPE_1_0 = 0, - HDMI_AUD_CHAN_TYPE_1_1, - HDMI_AUD_CHAN_TYPE_2_0, - HDMI_AUD_CHAN_TYPE_2_1, - HDMI_AUD_CHAN_TYPE_3_0, - HDMI_AUD_CHAN_TYPE_3_1, - HDMI_AUD_CHAN_TYPE_4_0, - HDMI_AUD_CHAN_TYPE_4_1, - HDMI_AUD_CHAN_TYPE_5_0, - HDMI_AUD_CHAN_TYPE_5_1, - HDMI_AUD_CHAN_TYPE_6_0, - HDMI_AUD_CHAN_TYPE_6_1, - HDMI_AUD_CHAN_TYPE_7_0, - HDMI_AUD_CHAN_TYPE_7_1, - HDMI_AUD_CHAN_TYPE_3_0_LRS, - HDMI_AUD_CHAN_TYPE_3_1_LRS, - HDMI_AUD_CHAN_TYPE_4_0_CLRS, - HDMI_AUD_CHAN_TYPE_4_1_CLRS, - HDMI_AUD_CHAN_TYPE_6_1_CS, - HDMI_AUD_CHAN_TYPE_6_1_CH, - HDMI_AUD_CHAN_TYPE_6_1_OH, - HDMI_AUD_CHAN_TYPE_6_1_CHR, - HDMI_AUD_CHAN_TYPE_7_1_LH_RH, - HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR, - HDMI_AUD_CHAN_TYPE_7_1_LC_RC, - HDMI_AUD_CHAN_TYPE_7_1_LW_RW, - HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD, - HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS, - HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS, - HDMI_AUD_CHAN_TYPE_7_1_CS_CH, - HDMI_AUD_CHAN_TYPE_7_1_CS_OH, - HDMI_AUD_CHAN_TYPE_7_1_CS_CHR, - HDMI_AUD_CHAN_TYPE_7_1_CH_OH, - HDMI_AUD_CHAN_TYPE_7_1_CH_CHR, - HDMI_AUD_CHAN_TYPE_7_1_OH_CHR, - HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR, - HDMI_AUD_CHAN_TYPE_6_0_CS, - HDMI_AUD_CHAN_TYPE_6_0_CH, - HDMI_AUD_CHAN_TYPE_6_0_OH, - HDMI_AUD_CHAN_TYPE_6_0_CHR, - HDMI_AUD_CHAN_TYPE_7_0_LH_RH, - HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR, - HDMI_AUD_CHAN_TYPE_7_0_LC_RC, - HDMI_AUD_CHAN_TYPE_7_0_LW_RW, - HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD, - HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS, - HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS, - HDMI_AUD_CHAN_TYPE_7_0_CS_CH, - HDMI_AUD_CHAN_TYPE_7_0_CS_OH, - HDMI_AUD_CHAN_TYPE_7_0_CS_CHR, - HDMI_AUD_CHAN_TYPE_7_0_CH_OH, - HDMI_AUD_CHAN_TYPE_7_0_CH_CHR, - HDMI_AUD_CHAN_TYPE_7_0_OH_CHR, - HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR, - HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS, - HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF -}; - -enum hdmi_aud_channel_swap_type { - HDMI_AUD_SWAP_LR, - HDMI_AUD_SWAP_LFE_CC, - HDMI_AUD_SWAP_LSRS, - HDMI_AUD_SWAP_RLS_RRS, - HDMI_AUD_SWAP_LR_STATUS, -}; - -struct hdmi_audio_param { - enum hdmi_audio_coding_type aud_codec; - enum hdmi_audio_sample_size aud_sample_size; - enum hdmi_aud_input_type aud_input_type; - enum hdmi_aud_i2s_fmt aud_i2s_fmt; - enum hdmi_aud_mclk aud_mclk; - enum hdmi_aud_channel_type aud_input_chan_type; - struct hdmi_codec_params codec_params; -}; - -struct mtk_hdmi_conf { - bool tz_disabled; - bool cea_modes_only; - unsigned long max_mode_clock; -}; - -struct mtk_hdmi { - struct drm_bridge bridge; - struct drm_bridge *next_bridge; - struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */ - struct device *dev; - const struct mtk_hdmi_conf *conf; - struct phy *phy; - struct device *cec_dev; - struct i2c_adapter *ddc_adpt; - struct clk *clk[MTK_HDMI_CLK_COUNT]; - struct drm_display_mode mode; - bool dvi_mode; - struct regmap *sys_regmap; - unsigned int sys_offset; - struct regmap *regs; - struct platform_device *audio_pdev; - struct hdmi_audio_param aud_param; - bool audio_enable; - bool powered; - bool enabled; - hdmi_codec_plugged_cb plugged_cb; - struct device *codec_dev; - struct mutex update_plugged_status_lock; -}; - -static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b) -{ - return container_of(b, struct mtk_hdmi, bridge); -} - static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black) { regmap_update_bits(hdmi->regs, VIDEO_CFG_4, @@ -600,88 +464,6 @@ static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi, regmap_write(hdmi->regs, GRL_CFG5, val); } -struct hdmi_acr_n { - unsigned int clock; - unsigned int n[3]; -}; - -/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */ -static const struct hdmi_acr_n hdmi_rec_n_table[] = { - /* Clock, N: 32kHz 44.1kHz 48kHz */ - { 25175, { 4576, 7007, 6864 } }, - { 74176, { 11648, 17836, 11648 } }, - { 148352, { 11648, 8918, 5824 } }, - { 296703, { 5824, 4459, 5824 } }, - { 297000, { 3072, 4704, 5120 } }, - { 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */ -}; - -/** - * hdmi_recommended_n() - Return N value recommended by HDMI specification - * @freq: audio sample rate in Hz - * @clock: rounded TMDS clock in kHz - */ -static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock) -{ - const struct hdmi_acr_n *recommended; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) { - if (clock == hdmi_rec_n_table[i].clock) - break; - } - recommended = hdmi_rec_n_table + i; - - switch (freq) { - case 32000: - return recommended->n[0]; - case 44100: - return recommended->n[1]; - case 48000: - return recommended->n[2]; - case 88200: - return recommended->n[1] * 2; - case 96000: - return recommended->n[2] * 2; - case 176400: - return recommended->n[1] * 4; - case 192000: - return recommended->n[2] * 4; - default: - return (128 * freq) / 1000; - } -} - -static unsigned int hdmi_mode_clock_to_hz(unsigned int clock) -{ - switch (clock) { - case 25175: - return 25174825; /* 25.2/1.001 MHz */ - case 74176: - return 74175824; /* 74.25/1.001 MHz */ - case 148352: - return 148351648; /* 148.5/1.001 MHz */ - case 296703: - return 296703297; /* 297/1.001 MHz */ - default: - return clock * 1000; - } -} - -static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate, - unsigned int tmds_clock, unsigned int n) -{ - return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n, - 128 * audio_sample_rate); -} - -static void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock, - unsigned int *n, unsigned int *cts) -{ - *n = hdmi_recommended_n(sample_rate, clock); - *cts = hdmi_expected_cts(sample_rate, clock, *n); -} - static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n, unsigned int cts) { @@ -1072,20 +854,6 @@ static const char * const mtk_hdmi_clk_names[MTK_HDMI_CLK_COUNT] = { [MTK_HDMI_CLK_AUD_SPDIF] = "spdif", }; -static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi, - struct device_node *np) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(mtk_hdmi_clk_names); i++) { - hdmi->clk[i] = of_clk_get_by_name(np, - mtk_hdmi_clk_names[i]); - if (IS_ERR(hdmi->clk[i])) - return PTR_ERR(hdmi->clk[i]); - } - return 0; -} - static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi) { int ret; @@ -1230,13 +998,6 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge, return 0; } -static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_atomic_state *state) { @@ -1268,28 +1029,6 @@ static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge, hdmi->powered = false; } -static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode) -{ - struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); - - dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n", - adjusted_mode->name, adjusted_mode->hdisplay); - dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d", - adjusted_mode->hsync_start, adjusted_mode->hsync_end, - adjusted_mode->htotal); - dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n", - adjusted_mode->hskew, adjusted_mode->vdisplay); - dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d", - adjusted_mode->vsync_start, adjusted_mode->vsync_end, - adjusted_mode->vtotal); - dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n", - adjusted_mode->vscan, adjusted_mode->flags); - - drm_mode_copy(&hdmi->mode, adjusted_mode); -} - static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { @@ -1345,169 +1084,10 @@ static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = { .edid_read = mtk_hdmi_bridge_edid_read, }; -static int mtk_hdmi_get_cec_dev(struct mtk_hdmi *hdmi, struct device *dev, struct device_node *np) -{ - struct platform_device *cec_pdev; - struct device_node *cec_np; - int ret; - - ret = mtk_hdmi_get_all_clk(hdmi, np); - if (ret) - return dev_err_probe(dev, ret, "Failed to get clocks\n"); - - /* The CEC module handles HDMI hotplug detection */ - cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec"); - if (!cec_np) - return dev_err_probe(dev, -EINVAL, "Failed to find CEC node\n"); - - cec_pdev = of_find_device_by_node(cec_np); - if (!cec_pdev) { - dev_err(hdmi->dev, "Waiting for CEC device %pOF\n", - cec_np); - of_node_put(cec_np); - return -EPROBE_DEFER; - } - of_node_put(cec_np); - - /* - * The mediatek,syscon-hdmi property contains a phandle link to the - * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG - * registers it contains. - */ - hdmi->sys_regmap = syscon_regmap_lookup_by_phandle_args(np, "mediatek,syscon-hdmi", - 1, &hdmi->sys_offset); - if (IS_ERR(hdmi->sys_regmap)) - return dev_err_probe(dev, PTR_ERR(hdmi->sys_regmap), - "Failed to get system configuration registers\n"); - - hdmi->cec_dev = &cec_pdev->dev; - return 0; -} - -static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, - struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - struct device_node *remote, *i2c_np; - int ret; - - ret = mtk_hdmi_get_all_clk(hdmi, np); - if (ret) - return dev_err_probe(dev, ret, "Failed to get clocks\n"); - - hdmi->regs = device_node_to_regmap(dev->of_node); - if (IS_ERR(hdmi->regs)) - return PTR_ERR(hdmi->regs); - - remote = of_graph_get_remote_node(np, 1, 0); - if (!remote) - return -EINVAL; - - if (!of_device_is_compatible(remote, "hdmi-connector")) { - hdmi->next_bridge = of_drm_find_bridge(remote); - if (!hdmi->next_bridge) { - dev_err(dev, "Waiting for external bridge\n"); - of_node_put(remote); - return -EPROBE_DEFER; - } - } - - i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0); - of_node_put(remote); - if (!i2c_np) - return dev_err_probe(dev, -EINVAL, "No ddc-i2c-bus in connector\n"); - - hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np); - of_node_put(i2c_np); - if (!hdmi->ddc_adpt) - return dev_err_probe(dev, -EINVAL, "Failed to get ddc i2c adapter by node\n"); - - ret = mtk_hdmi_get_cec_dev(hdmi, dev, np); - if (ret) - return ret; - - return 0; -} - /* * HDMI audio codec callbacks */ -static int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi, - struct hdmi_codec_daifmt *daifmt, - struct hdmi_codec_params *params) -{ - struct hdmi_audio_param aud_params = { 0 }; - unsigned int chan = params->cea.channels; - - dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__, - params->sample_rate, params->sample_width, chan); - - if (!hdmi->bridge.encoder) - return -ENODEV; - - switch (chan) { - case 2: - aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0; - break; - case 4: - aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0; - break; - case 6: - aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1; - break; - case 8: - aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1; - break; - default: - dev_err(hdmi->dev, "channel[%d] not supported!\n", chan); - return -EINVAL; - } - - switch (params->sample_rate) { - case 32000: - case 44100: - case 48000: - case 88200: - case 96000: - case 176400: - case 192000: - break; - default: - dev_err(hdmi->dev, "rate[%d] not supported!\n", - params->sample_rate); - return -EINVAL; - } - - switch (daifmt->fmt) { - case HDMI_I2S: - aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; - aud_params.aud_input_type = HDMI_AUD_INPUT_I2S; - aud_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; - aud_params.aud_mclk = HDMI_AUD_MCLK_128FS; - break; - case HDMI_SPDIF: - aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; - aud_params.aud_input_type = HDMI_AUD_INPUT_SPDIF; - break; - default: - dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__, - daifmt->fmt); - return -EINVAL; - } - memcpy(&aud_params.codec_params, params, sizeof(aud_params.codec_params)); - memcpy(&hdmi->aud_param, &aud_params, sizeof(aud_params)); - - dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n", - aud_params.aud_codec, aud_params.aud_input_type, - aud_params.aud_input_chan_type, aud_params.codec_params.sample_rate); - - return 0; -} - static int mtk_hdmi_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) @@ -1555,26 +1135,6 @@ mtk_hdmi_audio_mute(struct device *dev, void *data, return 0; } -static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) -{ - struct mtk_hdmi *hdmi = dev_get_drvdata(dev); - - if (hdmi->enabled) - memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len)); - else - memset(buf, 0, len); - return 0; -} - -static void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn, - struct device *codec_dev) -{ - mutex_lock(&hdmi->update_plugged_status_lock); - hdmi->plugged_cb = fn; - hdmi->codec_dev = codec_dev; - mutex_unlock(&hdmi->update_plugged_status_lock); -} - static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data, hdmi_codec_plugged_cb fn, struct device *codec_dev) @@ -1596,92 +1156,21 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = { .hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb, }; -static void mtk_hdmi_unregister_audio_driver(void *data) -{ - platform_device_unregister(data); -} - -static int mtk_hdmi_register_audio_driver(struct device *dev) -{ - struct mtk_hdmi *hdmi = dev_get_drvdata(dev); - struct hdmi_audio_param *aud_param = &hdmi->aud_param; - struct hdmi_codec_pdata codec_data = { - .ops = &mtk_hdmi_audio_codec_ops, - .max_i2s_channels = 2, - .i2s = 1, - .data = hdmi, - .no_capture_mute = 1, - }; - int ret; - - aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; - aud_param->aud_input_type = HDMI_AUD_INPUT_I2S; - aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; - aud_param->aud_mclk = HDMI_AUD_MCLK_128FS; - aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0; - - hdmi->audio_pdev = platform_device_register_data(dev, - HDMI_CODEC_DRV_NAME, - PLATFORM_DEVID_AUTO, - &codec_data, - sizeof(codec_data)); - if (IS_ERR(hdmi->audio_pdev)) - return PTR_ERR(hdmi->audio_pdev); - - ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver, - hdmi->audio_pdev); - if (ret) - return ret; - - return 0; -} - static int mtk_hdmi_probe(struct platform_device *pdev) { struct mtk_hdmi *hdmi; - struct device *dev = &pdev->dev; int ret; - hdmi = devm_drm_bridge_alloc(dev, struct mtk_hdmi, bridge, - &mtk_hdmi_bridge_funcs); + hdmi = mtk_hdmi_common_probe(pdev); if (IS_ERR(hdmi)) return PTR_ERR(hdmi); - hdmi->dev = dev; - hdmi->conf = of_device_get_match_data(dev); - - ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev); - if (ret) - return ret; - - hdmi->phy = devm_phy_get(dev, "hdmi"); - if (IS_ERR(hdmi->phy)) - return dev_err_probe(dev, PTR_ERR(hdmi->phy), - "Failed to get HDMI PHY\n"); - - mutex_init(&hdmi->update_plugged_status_lock); - platform_set_drvdata(pdev, hdmi); - - ret = mtk_hdmi_register_audio_driver(dev); - if (ret) - return dev_err_probe(dev, ret, - "Failed to register audio driver\n"); - - hdmi->bridge.of_node = pdev->dev.of_node; - hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID - | DRM_BRIDGE_OP_HPD; - hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; - hdmi->bridge.vendor = "MediaTek"; - hdmi->bridge.product = "On-Chip HDMI"; - - ret = devm_drm_bridge_add(dev, &hdmi->bridge); - if (ret) - return dev_err_probe(dev, ret, "Failed to add bridge\n"); + if (!hdmi->cec_dev) + return dev_err_probe(hdmi->dev, -ENODEV, "CEC is required by HDMIv1\n"); ret = mtk_hdmi_clk_enable_audio(hdmi); if (ret) - return dev_err_probe(dev, ret, + return dev_err_probe(hdmi->dev, ret, "Failed to enable audio clocks\n"); return 0; @@ -1712,19 +1201,32 @@ static __maybe_unused int mtk_hdmi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops, mtk_hdmi_suspend, mtk_hdmi_resume); +static const struct mtk_hdmi_ver_conf mtk_hdmi_v1_ver_conf = { + .bridge_funcs = &mtk_hdmi_bridge_funcs, + .codec_ops = &mtk_hdmi_audio_codec_ops, + .mtk_hdmi_clock_names = mtk_hdmi_clk_names, + .num_clocks = ARRAY_SIZE(mtk_hdmi_clk_names) +}; + static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = { .tz_disabled = true, + .ver_conf = &mtk_hdmi_v1_ver_conf }; static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8167 = { - .max_mode_clock = 148500, .cea_modes_only = true, + .max_mode_clock = 148500, + .ver_conf = &mtk_hdmi_v1_ver_conf +}; + +static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8173 = { + .ver_conf = &mtk_hdmi_v1_ver_conf }; static const struct of_device_id mtk_hdmi_of_ids[] = { { .compatible = "mediatek,mt2701-hdmi", .data = &mtk_hdmi_conf_mt2701 }, { .compatible = "mediatek,mt8167-hdmi", .data = &mtk_hdmi_conf_mt8167 }, - { .compatible = "mediatek,mt8173-hdmi" }, + { .compatible = "mediatek,mt8173-hdmi", .data = &mtk_hdmi_conf_mt8173 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mtk_hdmi_of_ids); @@ -1744,3 +1246,4 @@ MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>"); MODULE_DESCRIPTION("MediaTek HDMI Driver"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS("DRM_MTK_HDMI_V1"); +MODULE_IMPORT_NS("DRM_MTK_HDMI"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_common.c b/drivers/gpu/drm/mediatek/mtk_hdmi_common.c new file mode 100644 index 000000000000..e78eb0876f16 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_common.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014 MediaTek Inc. + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#include <drm/drm_modes.h> +#include <linux/device.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/math.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/mfd/syscon.h> +#include <sound/hdmi-codec.h> + +#include "mtk_hdmi_common.h" + +struct hdmi_acr_n { + unsigned int clock; + unsigned int n[3]; +}; + +/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */ +static const struct hdmi_acr_n hdmi_rec_n_table[] = { + /* Clock, N: 32kHz 44.1kHz 48kHz */ + { 25175, { 4576, 7007, 6864 } }, + { 74176, { 11648, 17836, 11648 } }, + { 148352, { 11648, 8918, 5824 } }, + { 296703, { 5824, 4459, 5824 } }, + { 297000, { 3072, 4704, 5120 } }, + { 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */ +}; + +/** + * hdmi_recommended_n() - Return N value recommended by HDMI specification + * @freq: audio sample rate in Hz + * @clock: rounded TMDS clock in kHz + */ +static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock) +{ + const struct hdmi_acr_n *recommended; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) { + if (clock == hdmi_rec_n_table[i].clock) + break; + } + recommended = hdmi_rec_n_table + i; + + switch (freq) { + case 32000: + return recommended->n[0]; + case 44100: + return recommended->n[1]; + case 48000: + return recommended->n[2]; + case 88200: + return recommended->n[1] * 2; + case 96000: + return recommended->n[2] * 2; + case 176400: + return recommended->n[1] * 4; + case 192000: + return recommended->n[2] * 4; + default: + return (128 * freq) / 1000; + } +} + +static unsigned int hdmi_mode_clock_to_hz(unsigned int clock) +{ + switch (clock) { + case 25175: + return 25174825; /* 25.2/1.001 MHz */ + case 74176: + return 74175824; /* 74.25/1.001 MHz */ + case 148352: + return 148351648; /* 148.5/1.001 MHz */ + case 296703: + return 296703297; /* 297/1.001 MHz */ + default: + return clock * 1000; + } +} + +static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate, + unsigned int tmds_clock, unsigned int n) +{ + return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n, + 128 * audio_sample_rate); +} + +void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock, + unsigned int *n, unsigned int *cts) +{ + *n = hdmi_recommended_n(sample_rate, clock); + *cts = hdmi_expected_cts(sample_rate, clock, *n); +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_get_ncts, "DRM_MTK_HDMI"); + +int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct hdmi_audio_param aud_params = { 0 }; + unsigned int chan = params->cea.channels; + + dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__, + params->sample_rate, params->sample_width, chan); + + if (!hdmi->bridge.encoder) + return -ENODEV; + + switch (chan) { + case 2: + aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0; + break; + case 4: + aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0; + break; + case 6: + aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1; + break; + case 8: + aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1; + break; + default: + dev_err(hdmi->dev, "channel[%d] not supported!\n", chan); + return -EINVAL; + } + + switch (params->sample_rate) { + case 32000: + case 44100: + case 48000: + case 88200: + case 96000: + case 176400: + case 192000: + break; + default: + dev_err(hdmi->dev, "rate[%d] not supported!\n", + params->sample_rate); + return -EINVAL; + } + + switch (daifmt->fmt) { + case HDMI_I2S: + aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; + aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_params.aud_input_type = HDMI_AUD_INPUT_I2S; + aud_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; + aud_params.aud_mclk = HDMI_AUD_MCLK_128FS; + break; + case HDMI_SPDIF: + aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; + aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_params.aud_input_type = HDMI_AUD_INPUT_SPDIF; + break; + default: + dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__, + daifmt->fmt); + return -EINVAL; + } + memcpy(&aud_params.codec_params, params, sizeof(aud_params.codec_params)); + memcpy(&hdmi->aud_param, &aud_params, sizeof(aud_params)); + + dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n", + aud_params.aud_codec, aud_params.aud_input_type, + aud_params.aud_input_chan_type, aud_params.codec_params.sample_rate); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_audio_params, "DRM_MTK_HDMI"); + +int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + if (hdmi->enabled) + memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len)); + else + memset(buf, 0, len); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_audio_get_eld, "DRM_MTK_HDMI"); + +void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + mutex_lock(&hdmi->update_plugged_status_lock); + hdmi->plugged_cb = fn; + hdmi->codec_dev = codec_dev; + mutex_unlock(&hdmi->update_plugged_status_lock); +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_audio_set_plugged_cb, "DRM_MTK_HDMI"); + +static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi, struct device_node *np, + const char * const *clock_names, size_t num_clocks) +{ + int i; + + for (i = 0; i < num_clocks; i++) { + hdmi->clk[i] = of_clk_get_by_name(np, clock_names[i]); + + if (IS_ERR(hdmi->clk[i])) + return PTR_ERR(hdmi->clk[i]); + } + + return 0; +} + +bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_bridge_mode_fixup, "DRM_MTK_HDMI"); + +void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n", + adjusted_mode->name, adjusted_mode->hdisplay); + dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d", + adjusted_mode->hsync_start, adjusted_mode->hsync_end, + adjusted_mode->htotal); + dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n", + adjusted_mode->hskew, adjusted_mode->vdisplay); + dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d", + adjusted_mode->vsync_start, adjusted_mode->vsync_end, + adjusted_mode->vtotal); + dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n", + adjusted_mode->vscan, adjusted_mode->flags); + + drm_mode_copy(&hdmi->mode, adjusted_mode); +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_bridge_mode_set, "DRM_MTK_HDMI"); + +static void mtk_hdmi_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + +static int mtk_hdmi_get_cec_dev(struct mtk_hdmi *hdmi, struct device *dev, struct device_node *np) +{ + struct platform_device *cec_pdev; + struct device_node *cec_np; + int ret; + + /* The CEC module handles HDMI hotplug detection */ + cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec"); + if (!cec_np) + return dev_err_probe(dev, -EOPNOTSUPP, "Failed to find CEC node\n"); + + cec_pdev = of_find_device_by_node(cec_np); + if (!cec_pdev) { + dev_err(hdmi->dev, "Waiting for CEC device %pOF\n", cec_np); + of_node_put(cec_np); + return -EPROBE_DEFER; + } + of_node_put(cec_np); + + ret = devm_add_action_or_reset(dev, mtk_hdmi_put_device, &cec_pdev->dev); + if (ret) + return ret; + + /* + * The mediatek,syscon-hdmi property contains a phandle link to the + * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG + * registers it contains. + */ + hdmi->sys_regmap = syscon_regmap_lookup_by_phandle_args(np, "mediatek,syscon-hdmi", + 1, &hdmi->sys_offset); + if (IS_ERR(hdmi->sys_regmap)) + return dev_err_probe(dev, PTR_ERR(hdmi->sys_regmap), + "Failed to get system configuration registers\n"); + + hdmi->cec_dev = &cec_pdev->dev; + return 0; +} + +static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, struct platform_device *pdev, + const char * const *clk_names, size_t num_clocks) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *remote, *i2c_np; + int ret; + + ret = mtk_hdmi_get_all_clk(hdmi, np, clk_names, num_clocks); + if (ret) + return dev_err_probe(dev, ret, "Failed to get clocks\n"); + + hdmi->irq = platform_get_irq(pdev, 0); + if (!hdmi->irq) + return hdmi->irq; + + hdmi->regs = device_node_to_regmap(dev->of_node); + if (IS_ERR(hdmi->regs)) + return PTR_ERR(hdmi->regs); + + remote = of_graph_get_remote_node(np, 1, 0); + if (!remote) + return -EINVAL; + + if (!of_device_is_compatible(remote, "hdmi-connector")) { + hdmi->next_bridge = of_drm_find_bridge(remote); + if (!hdmi->next_bridge) { + dev_err(dev, "Waiting for external bridge\n"); + of_node_put(remote); + return -EPROBE_DEFER; + } + } + + i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0); + of_node_put(remote); + if (!i2c_np) + return dev_err_probe(dev, -EINVAL, "No ddc-i2c-bus in connector\n"); + + hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np); + of_node_put(i2c_np); + if (!hdmi->ddc_adpt) + return dev_err_probe(dev, -EPROBE_DEFER, "Failed to get ddc i2c adapter by node\n"); + + ret = devm_add_action_or_reset(dev, mtk_hdmi_put_device, &hdmi->ddc_adpt->dev); + if (ret) + return ret; + + ret = mtk_hdmi_get_cec_dev(hdmi, dev, np); + if (ret == -EOPNOTSUPP) + dev_info(dev, "CEC support unavailable: node not found\n"); + else if (ret) + return ret; + + return 0; +} + +static void mtk_hdmi_unregister_audio_driver(void *data) +{ + platform_device_unregister(data); +} + +static int mtk_hdmi_register_audio_driver(struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + struct hdmi_audio_param *aud_param = &hdmi->aud_param; + struct hdmi_codec_pdata codec_data = { + .ops = hdmi->conf->ver_conf->codec_ops, + .max_i2s_channels = 2, + .i2s = 1, + .data = hdmi, + .no_capture_mute = 1, + }; + int ret; + + aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; + aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_param->aud_input_type = HDMI_AUD_INPUT_I2S; + aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; + aud_param->aud_mclk = HDMI_AUD_MCLK_128FS; + aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0; + + hdmi->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + if (IS_ERR(hdmi->audio_pdev)) + return PTR_ERR(hdmi->audio_pdev); + + ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver, + hdmi->audio_pdev); + if (ret) + return ret; + + return 0; +} + +struct mtk_hdmi *mtk_hdmi_common_probe(struct platform_device *pdev) +{ + const struct mtk_hdmi_ver_conf *ver_conf; + const struct mtk_hdmi_conf *hdmi_conf; + struct device *dev = &pdev->dev; + struct mtk_hdmi *hdmi; + int ret; + + hdmi_conf = of_device_get_match_data(dev); + if (!hdmi_conf) + return ERR_PTR(-ENODEV); + + ver_conf = hdmi_conf->ver_conf; + + hdmi = devm_drm_bridge_alloc(dev, struct mtk_hdmi, bridge, + ver_conf->bridge_funcs); + if (IS_ERR(hdmi)) + return hdmi; + + hdmi->dev = dev; + hdmi->conf = hdmi_conf; + + hdmi->clk = devm_kcalloc(dev, ver_conf->num_clocks, sizeof(*hdmi->clk), GFP_KERNEL); + if (!hdmi->clk) + return ERR_PTR(-ENOMEM); + + ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev, ver_conf->mtk_hdmi_clock_names, + ver_conf->num_clocks); + if (ret) + return ERR_PTR(ret); + + hdmi->phy = devm_phy_get(dev, "hdmi"); + if (IS_ERR(hdmi->phy)) + return dev_err_cast_probe(dev, hdmi->phy, "Failed to get HDMI PHY\n"); + + mutex_init(&hdmi->update_plugged_status_lock); + platform_set_drvdata(pdev, hdmi); + + ret = mtk_hdmi_register_audio_driver(dev); + if (ret) + return dev_err_ptr_probe(dev, ret, "Cannot register HDMI Audio driver\n"); + + hdmi->bridge.of_node = pdev->dev.of_node; + hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID + | DRM_BRIDGE_OP_HPD; + + if (ver_conf->bridge_funcs->hdmi_write_infoframe && + ver_conf->bridge_funcs->hdmi_clear_infoframe) + hdmi->bridge.ops |= DRM_BRIDGE_OP_HDMI; + + hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + hdmi->bridge.ddc = hdmi->ddc_adpt; + hdmi->bridge.vendor = "MediaTek"; + hdmi->bridge.product = "On-Chip HDMI"; + hdmi->bridge.interlace_allowed = ver_conf->interlace_allowed; + + ret = devm_drm_bridge_add(dev, &hdmi->bridge); + if (ret) + return dev_err_ptr_probe(dev, ret, "Failed to add bridge\n"); + + return hdmi; +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_common_probe, "DRM_MTK_HDMI"); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>"); +MODULE_DESCRIPTION("MediaTek HDMI Common Library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_common.h b/drivers/gpu/drm/mediatek/mtk_hdmi_common.h new file mode 100644 index 000000000000..de5e064585f8 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_common.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + * Copyright (c) 2024 Collabora Ltd. + */ + +#ifndef _MTK_HDMI_COMMON_H +#define _MTK_HDMI_COMMON_H + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_print.h> + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/mfd/syscon.h> +#include <linux/mutex.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> + +#include <sound/hdmi-codec.h> + +enum hdmi_aud_input_type { + HDMI_AUD_INPUT_I2S = 0, + HDMI_AUD_INPUT_SPDIF, +}; + +enum hdmi_aud_i2s_fmt { + HDMI_I2S_MODE_RJT_24BIT = 0, + HDMI_I2S_MODE_RJT_16BIT, + HDMI_I2S_MODE_LJT_24BIT, + HDMI_I2S_MODE_LJT_16BIT, + HDMI_I2S_MODE_I2S_24BIT, + HDMI_I2S_MODE_I2S_16BIT +}; + +enum hdmi_aud_mclk { + HDMI_AUD_MCLK_128FS, + HDMI_AUD_MCLK_192FS, + HDMI_AUD_MCLK_256FS, + HDMI_AUD_MCLK_384FS, + HDMI_AUD_MCLK_512FS, + HDMI_AUD_MCLK_768FS, + HDMI_AUD_MCLK_1152FS, +}; + +enum hdmi_aud_channel_type { + HDMI_AUD_CHAN_TYPE_1_0 = 0, + HDMI_AUD_CHAN_TYPE_1_1, + HDMI_AUD_CHAN_TYPE_2_0, + HDMI_AUD_CHAN_TYPE_2_1, + HDMI_AUD_CHAN_TYPE_3_0, + HDMI_AUD_CHAN_TYPE_3_1, + HDMI_AUD_CHAN_TYPE_4_0, + HDMI_AUD_CHAN_TYPE_4_1, + HDMI_AUD_CHAN_TYPE_5_0, + HDMI_AUD_CHAN_TYPE_5_1, + HDMI_AUD_CHAN_TYPE_6_0, + HDMI_AUD_CHAN_TYPE_6_1, + HDMI_AUD_CHAN_TYPE_7_0, + HDMI_AUD_CHAN_TYPE_7_1, + HDMI_AUD_CHAN_TYPE_3_0_LRS, + HDMI_AUD_CHAN_TYPE_3_1_LRS, + HDMI_AUD_CHAN_TYPE_4_0_CLRS, + HDMI_AUD_CHAN_TYPE_4_1_CLRS, + HDMI_AUD_CHAN_TYPE_6_1_CS, + HDMI_AUD_CHAN_TYPE_6_1_CH, + HDMI_AUD_CHAN_TYPE_6_1_OH, + HDMI_AUD_CHAN_TYPE_6_1_CHR, + HDMI_AUD_CHAN_TYPE_7_1_LH_RH, + HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR, + HDMI_AUD_CHAN_TYPE_7_1_LC_RC, + HDMI_AUD_CHAN_TYPE_7_1_LW_RW, + HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD, + HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS, + HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS, + HDMI_AUD_CHAN_TYPE_7_1_CS_CH, + HDMI_AUD_CHAN_TYPE_7_1_CS_OH, + HDMI_AUD_CHAN_TYPE_7_1_CS_CHR, + HDMI_AUD_CHAN_TYPE_7_1_CH_OH, + HDMI_AUD_CHAN_TYPE_7_1_CH_CHR, + HDMI_AUD_CHAN_TYPE_7_1_OH_CHR, + HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR, + HDMI_AUD_CHAN_TYPE_6_0_CS, + HDMI_AUD_CHAN_TYPE_6_0_CH, + HDMI_AUD_CHAN_TYPE_6_0_OH, + HDMI_AUD_CHAN_TYPE_6_0_CHR, + HDMI_AUD_CHAN_TYPE_7_0_LH_RH, + HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR, + HDMI_AUD_CHAN_TYPE_7_0_LC_RC, + HDMI_AUD_CHAN_TYPE_7_0_LW_RW, + HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD, + HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS, + HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS, + HDMI_AUD_CHAN_TYPE_7_0_CS_CH, + HDMI_AUD_CHAN_TYPE_7_0_CS_OH, + HDMI_AUD_CHAN_TYPE_7_0_CS_CHR, + HDMI_AUD_CHAN_TYPE_7_0_CH_OH, + HDMI_AUD_CHAN_TYPE_7_0_CH_CHR, + HDMI_AUD_CHAN_TYPE_7_0_OH_CHR, + HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR, + HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS, + HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF +}; + +enum hdmi_aud_channel_swap_type { + HDMI_AUD_SWAP_LR, + HDMI_AUD_SWAP_LFE_CC, + HDMI_AUD_SWAP_LSRS, + HDMI_AUD_SWAP_RLS_RRS, + HDMI_AUD_SWAP_LR_STATUS, +}; + +struct hdmi_audio_param { + enum hdmi_audio_coding_type aud_codec; + enum hdmi_audio_sample_size aud_sample_size; + enum hdmi_aud_input_type aud_input_type; + enum hdmi_aud_i2s_fmt aud_i2s_fmt; + enum hdmi_aud_mclk aud_mclk; + enum hdmi_aud_channel_type aud_input_chan_type; + struct hdmi_codec_params codec_params; +}; + +enum hdmi_hpd_state { + HDMI_PLUG_OUT = 0, + HDMI_PLUG_IN_AND_SINK_POWER_ON, + HDMI_PLUG_IN_ONLY, +}; + +struct mtk_hdmi_ver_conf { + const struct drm_bridge_funcs *bridge_funcs; + const struct hdmi_codec_ops *codec_ops; + const char * const *mtk_hdmi_clock_names; + int num_clocks; + bool interlace_allowed; +}; + +struct mtk_hdmi_conf { + const struct mtk_hdmi_ver_conf *ver_conf; + bool tz_disabled; + bool cea_modes_only; + unsigned long max_mode_clock; + u32 reg_hdmi_tx_cfg; +}; + +struct mtk_hdmi { + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */ + struct device *dev; + const struct mtk_hdmi_conf *conf; + struct phy *phy; + struct device *cec_dev; + struct i2c_adapter *ddc_adpt; + struct clk **clk; + struct drm_display_mode mode; + bool dvi_mode; + struct regmap *sys_regmap; + unsigned int sys_offset; + struct regmap *regs; + struct platform_device *audio_pdev; + struct hdmi_audio_param aud_param; + bool audio_enable; + bool powered; + bool enabled; + unsigned int irq; + enum hdmi_hpd_state hpd; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct mutex update_plugged_status_lock; +}; + +static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b) +{ + return container_of(b, struct mtk_hdmi, bridge); +} + + +int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len); +void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn, + struct device *codec_dev); +int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi, struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params); +void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock, + unsigned int *n, unsigned int *cts); +bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode); +struct mtk_hdmi *mtk_hdmi_common_probe(struct platform_device *pdev); +#endif /* _MTK_HDMI_COMMON_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c new file mode 100644 index 000000000000..b844e2c10f28 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek HDMI v2 Display Data Channel Driver + * + * Copyright (c) 2021 MediaTek Inc. + * Copyright (c) 2021 BayLibre, SAS + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/types.h> + +#include <drm/drm_edid.h> + +#include "mtk_hdmi_common.h" +#include "mtk_hdmi_regs_v2.h" + +#define DDC2_DLY_CNT 572 /* BIM=208M/(v*4) = 90Khz */ +#define DDC2_DLY_CNT_EDID 832 /* BIM=208M/(v*4) = 62.5Khz */ +#define SI2C_ADDR_READ 0xf4 +#define SCDC_I2C_SLAVE_ADDRESS 0x54 + +struct mtk_hdmi_ddc { + struct device *dev; + struct regmap *regs; + struct clk *clk; + struct i2c_adapter adap; +}; + +static int mtk_ddc_check_and_rise_low_bus(struct mtk_hdmi_ddc *ddc) +{ + u32 val; + + regmap_read(ddc->regs, HDCP2X_DDCM_STATUS, &val); + if (val & DDC_I2C_BUS_LOW) { + regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD, + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_CLOCK_SCL)); + usleep_range(250, 300); + } + + if (val & DDC_I2C_NO_ACK) { + u32 ddc_ctrl, hpd_ddc_ctrl, hpd_ddc_status; + + regmap_read(ddc->regs, DDC_CTRL, &ddc_ctrl); + regmap_read(ddc->regs, HPD_DDC_CTRL, &hpd_ddc_ctrl); + regmap_read(ddc->regs, HPD_DDC_STATUS, &hpd_ddc_status); + } + + if (val & DDC_I2C_NO_ACK) + return -EIO; + + return 0; +} + +static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id, + u16 offset_id, u8 *wr_data) +{ + u32 val; + int ret; + + /* If down, rise bus for write operation */ + mtk_ddc_check_and_rise_low_bus(ddc); + + regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT, + FIELD_PREP(HPD_DDC_DELAY_CNT, DDC2_DLY_CNT)); + + if (wr_data) { + regmap_write(ddc->regs, SI2C_CTRL, + FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) | + FIELD_PREP(SI2C_WDATA, *wr_data) | + SI2C_WR); + } + + regmap_write(ddc->regs, DDC_CTRL, + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_WRITE) | + FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : 1) | + FIELD_PREP(DDC_CTRL_OFFSET, offset_id) | + FIELD_PREP(DDC_CTRL_ADDR, addr_id)); + usleep_range(1000, 1250); + + ret = regmap_read_poll_timeout(ddc->regs, HPD_DDC_STATUS, val, + !(val & DDC_I2C_IN_PROG), 500, 1000); + if (ret) { + dev_err(ddc->dev, "DDC I2C write timeout\n"); + return ret; + } + + /* The I2C bus might be down after WR operation: rise it again */ + ret = mtk_ddc_check_and_rise_low_bus(ddc); + if (ret) { + dev_err(ddc->dev, "Error during write operation: No ACK\n"); + return ret; + } + + return 0; +} + +static int mtk_ddcm_read_hdmi(struct mtk_hdmi_ddc *ddc, u16 uc_dev, + u8 addr, u8 *puc_value, u16 data_cnt) +{ + u16 dly_cnt, i, uc_idx; + u32 rem, temp_length, uc_read_count, val; + u64 loop_counter; + int ret; + + mtk_ddc_check_and_rise_low_bus(ddc); + + regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD, + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_CLEAR_FIFO)); + + if (data_cnt >= 16) { + temp_length = 16; + loop_counter = data_cnt; + + rem = do_div(loop_counter, temp_length); + if (rem) + loop_counter++; + } else { + temp_length = data_cnt; + loop_counter = 1; + } + + if (uc_dev >= DDC_ADDR) + dly_cnt = DDC2_DLY_CNT_EDID; + else + dly_cnt = DDC2_DLY_CNT; + + regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT, + FIELD_PREP(HPD_DDC_DELAY_CNT, dly_cnt)); + + for (i = 0; i < loop_counter; i++) { + rem = data_cnt % 16; + + if (i > 0 && i == (loop_counter - 1) && rem) + temp_length = rem; + + /* 0x51 - 0x53: Flow control */ + if (uc_dev > DDC_ADDR && uc_dev <= 0x53) { + regmap_update_bits(ddc->regs, SCDC_CTRL, SCDC_DDC_SEGMENT, + FIELD_PREP(SCDC_DDC_SEGMENT, uc_dev - DDC_ADDR)); + + regmap_write(ddc->regs, DDC_CTRL, + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ENH_READ_NOACK) | + FIELD_PREP(DDC_CTRL_DIN_CNT, temp_length) | + FIELD_PREP(DDC_CTRL_OFFSET, addr + i * temp_length) | + FIELD_PREP(DDC_CTRL_ADDR, DDC_ADDR)); + } else { + u16 offset; + + if (addr != 0x43) + offset = i * 16; + else + offset = 0; + + regmap_write(ddc->regs, DDC_CTRL, + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_READ_NOACK) | + FIELD_PREP(DDC_CTRL_DIN_CNT, temp_length) | + FIELD_PREP(DDC_CTRL_OFFSET, addr + offset) | + FIELD_PREP(DDC_CTRL_ADDR, uc_dev)); + } + usleep_range(5000, 5500); + + ret = regmap_read_poll_timeout(ddc->regs, HPD_DDC_STATUS, val, + !(val & DDC_I2C_IN_PROG), 1000, + 500 * (temp_length + 5)); + if (ret) { + dev_err(ddc->dev, "Timeout waiting for DDC I2C\n"); + return ret; + } + + ret = mtk_ddc_check_and_rise_low_bus(ddc); + if (ret) { + dev_err(ddc->dev, "Error during read operation: No ACK\n"); + return ret; + } + + for (uc_idx = 0; uc_idx < temp_length; uc_idx++) { + unsigned int read_idx = i * 16 + uc_idx; + + regmap_write(ddc->regs, SI2C_CTRL, + FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) | + SI2C_RD); + + regmap_read(ddc->regs, HPD_DDC_STATUS, &val); + puc_value[read_idx] = FIELD_GET(DDC_DATA_OUT, val); + + regmap_write(ddc->regs, SI2C_CTRL, + FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) | + SI2C_CONFIRM_READ); + + /* + * If HDMI IP gets reset during EDID read, DDC read + * operation will fail and its delay counter will be + * reset to 400. + */ + regmap_read(ddc->regs, HPD_DDC_CTRL, &val); + if (FIELD_GET(HPD_DDC_DELAY_CNT, val) < DDC2_DLY_CNT) + return 0; + + uc_read_count = read_idx + 1; + } + } + if (uc_read_count > U8_MAX) + dev_warn(ddc->dev, "Invalid read data count %u\n", uc_read_count); + + return uc_read_count; +} + +static int mtk_hdmi_fg_ddc_data_read(struct mtk_hdmi_ddc *ddc, u16 b_dev, + u8 data_addr, u16 data_cnt, u8 *pr_data) +{ + int read_data_cnt; + u16 req_data_cnt; + + if (!data_cnt) { + dev_err(ddc->dev, "Invalid DDCM read request\n"); + return -EINVAL; + } + + req_data_cnt = U8_MAX - data_addr + 1; + if (req_data_cnt > data_cnt) + req_data_cnt = data_cnt; + + regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN); + + read_data_cnt = mtk_ddcm_read_hdmi(ddc, b_dev, data_addr, pr_data, req_data_cnt); + + if (read_data_cnt < 0) + return read_data_cnt; + else if (read_data_cnt != req_data_cnt) + return -EINVAL; + + return 0; +} + +static int mtk_hdmi_ddc_fg_data_write(struct mtk_hdmi_ddc *ddc, u16 b_dev, + u8 data_addr, u16 data_cnt, u8 *pr_data) +{ + int i, ret; + + regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN); + /* + * In case there is no payload data, just do a single write for the + * address only + */ + if (data_cnt == 0) + return mtk_ddc_wr_one(ddc, b_dev, data_addr, NULL); + + i = 0; + do { + ret = mtk_ddc_wr_one(ddc, b_dev, data_addr + i, pr_data + i); + if (ret) + return ret; + } while (++i < data_cnt); + + return 0; +} + +static int mtk_hdmi_ddc_v2_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) +{ + struct mtk_hdmi_ddc *ddc; + u8 offset = 0; + int i, ret; + + ddc = adapter->algo_data; + + for (i = 0; i < num; i++) { + struct i2c_msg *msg = &msgs[i]; + + if (!msg->buf) { + dev_err(ddc->dev, "No message buffer\n"); + return -EINVAL; + } + + if (msg->flags & I2C_M_RD) { + /* + * The underlying DDC hardware always issues a write request + * that assigns the read offset as part of the read operation, + * therefore, use the `offset` value assigned in the previous + * write request from drm_edid + */ + ret = mtk_hdmi_fg_ddc_data_read(ddc, msg->addr, offset, + msg->len, &msg->buf[0]); + if (ret) + return ret; + } else { + /* + * The HW needs the data offset, found in buf[0], in the + * DDC_CTRL register, and each byte of data, starting at + * buf[1], goes in the SI2C_WDATA register. + */ + ret = mtk_hdmi_ddc_fg_data_write(ddc, msg->addr, msg->buf[0], + msg->len - 1, &msg->buf[1]); + if (ret) + return ret; + + /* + * Store the offset value requested by drm_edid or by + * scdc to use in subsequent read requests. + */ + if ((msg->addr == DDC_ADDR || msg->addr == SCDC_I2C_SLAVE_ADDRESS) && + msg->len == 1) { + offset = msg->buf[0]; + } + } + } + + return i; +} + +static u32 mtk_hdmi_ddc_v2_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm mtk_hdmi_ddc_v2_algorithm = { + .master_xfer = mtk_hdmi_ddc_v2_xfer, + .functionality = mtk_hdmi_ddc_v2_func, +}; + +static int mtk_hdmi_ddc_v2_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_hdmi_ddc *ddc; + int ret; + + ddc = devm_kzalloc(dev, sizeof(*ddc), GFP_KERNEL); + if (!ddc) + return -ENOMEM; + + ddc->dev = dev; + ddc->regs = device_node_to_regmap(dev->parent->of_node); + if (IS_ERR_OR_NULL(ddc->regs)) + return dev_err_probe(dev, + IS_ERR(ddc->regs) ? PTR_ERR(ddc->regs) : -EINVAL, + "Cannot get regmap\n"); + + ddc->clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(ddc->clk)) + return dev_err_probe(dev, PTR_ERR(ddc->clk), "Cannot get DDC clock\n"); + + strscpy(ddc->adap.name, "mediatek-hdmi-ddc-v2", sizeof(ddc->adap.name)); + ddc->adap.owner = THIS_MODULE; + ddc->adap.algo = &mtk_hdmi_ddc_v2_algorithm; + ddc->adap.retries = 3; + ddc->adap.dev.of_node = dev->of_node; + ddc->adap.algo_data = ddc; + ddc->adap.dev.parent = &pdev->dev; + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Cannot enable Runtime PM\n"); + + pm_runtime_get_sync(dev); + + ret = devm_i2c_add_adapter(dev, &ddc->adap); + if (ret < 0) + return dev_err_probe(dev, ret, "Cannot add DDC I2C adapter\n"); + + platform_set_drvdata(pdev, ddc); + return 0; +} + +static const struct of_device_id mtk_hdmi_ddc_v2_match[] = { + { .compatible = "mediatek,mt8195-hdmi-ddc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_hdmi_ddc_v2_match); + +struct platform_driver mtk_hdmi_ddc_v2_driver = { + .probe = mtk_hdmi_ddc_v2_probe, + .driver = { + .name = "mediatek-hdmi-ddc-v2", + .of_match_table = mtk_hdmi_ddc_v2_match, + }, +}; +module_platform_driver(mtk_hdmi_ddc_v2_driver); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>"); +MODULE_AUTHOR("Can Zeng <can.zeng@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek HDMIv2 DDC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h new file mode 100644 index 000000000000..521b35c7e14d --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + * Copyright (c) 2021 BayLibre, SAS + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#ifndef _MTK_HDMI_REGS_H +#define _MTK_HDMI_REGS_H + +/* HDMI_TOP Config */ +#define TOP_CFG00 0x000 +#define HDMI2_ON BIT(2) +#define HDMI_MODE_HDMI BIT(3) +#define SCR_ON BIT(4) +#define TMDS_PACK_MODE GENMASK(9, 8) +#define TMDS_PACK_MODE_8BPP 0 +#define TMDS_PACK_MODE_10BPP 1 +#define TMDS_PACK_MODE_12BPP 2 +#define TMDS_PACK_MODE_16BPP 3 +#define DEEPCOLOR_PKT_EN BIT(12) +#define HDMI_ABIST_VIDEO_FORMAT GENMASK(21, 16) +#define HDMI_ABIST_ENABLE BIT(31) +#define TOP_CFG01 0x004 +#define CP_SET_MUTE_EN BIT(0) +#define CP_CLR_MUTE_EN BIT(1) +#define NULL_PKT_EN BIT(2) +#define NULL_PKT_VSYNC_HIGH_EN BIT(3) + +/* HDMI_TOP Audio: Channel Mapping */ +#define TOP_AUD_MAP 0x00c +#define SD0_MAP GENMASK(2, 0) +#define SD1_MAP GENMASK(6, 4) +#define SD2_MAP GENMASK(10, 8) +#define SD3_MAP GENMASK(14, 12) +#define SD4_MAP GENMASK(18, 16) +#define SD5_MAP GENMASK(22, 20) +#define SD6_MAP GENMASK(26, 24) +#define SD7_MAP GENMASK(30, 28) + +/* Auxiliary Video Information (AVI) Infoframe */ +#define TOP_AVI_HEADER 0x024 +#define TOP_AVI_PKT00 0x028 +#define TOP_AVI_PKT01 0x02C +#define TOP_AVI_PKT02 0x030 +#define TOP_AVI_PKT03 0x034 +#define TOP_AVI_PKT04 0x038 +#define TOP_AVI_PKT05 0x03C + +/* Audio Interface Infoframe */ +#define TOP_AIF_HEADER 0x040 +#define TOP_AIF_PKT00 0x044 +#define TOP_AIF_PKT01 0x048 +#define TOP_AIF_PKT02 0x04c +#define TOP_AIF_PKT03 0x050 + +/* Audio SPDIF Infoframe */ +#define TOP_SPDIF_HEADER 0x054 +#define TOP_SPDIF_PKT00 0x058 +#define TOP_SPDIF_PKT01 0x05c +#define TOP_SPDIF_PKT02 0x060 +#define TOP_SPDIF_PKT03 0x064 +#define TOP_SPDIF_PKT04 0x068 +#define TOP_SPDIF_PKT05 0x06c +#define TOP_SPDIF_PKT06 0x070 +#define TOP_SPDIF_PKT07 0x074 + +/* Infoframes Configuration */ +#define TOP_INFO_EN 0x01c +#define AVI_EN BIT(0) +#define SPD_EN BIT(1) +#define AUD_EN BIT(2) +#define CP_EN BIT(5) +#define VSIF_EN BIT(11) +#define AVI_EN_WR BIT(16) +#define SPD_EN_WR BIT(17) +#define AUD_EN_WR BIT(18) +#define CP_EN_WR BIT(21) +#define VSIF_EN_WR BIT(27) +#define TOP_INFO_RPT 0x020 +#define AVI_RPT_EN BIT(0) +#define SPD_RPT_EN BIT(1) +#define AUD_RPT_EN BIT(2) +#define CP_RPT_EN BIT(5) +#define VSIF_RPT_EN BIT(11) + +/* Vendor Specific Infoframe */ +#define TOP_VSIF_HEADER 0x174 +#define TOP_VSIF_PKT00 0x178 +#define TOP_VSIF_PKT01 0x17c +#define TOP_VSIF_PKT02 0x180 +#define TOP_VSIF_PKT03 0x184 +#define TOP_VSIF_PKT04 0x188 +#define TOP_VSIF_PKT05 0x18c +#define TOP_VSIF_PKT06 0x190 +#define TOP_VSIF_PKT07 0x194 + +/* HDMI_TOP Misc */ +#define TOP_MISC_CTLR 0x1a4 +#define DEEP_COLOR_ADD BIT(4) + +/* Hardware interrupts */ +#define TOP_INT_STA00 0x1a8 +#define TOP_INT_ENABLE00 0x1b0 +#define HTPLG_R_INT BIT(0) +#define HTPLG_F_INT BIT(1) +#define PORD_R_INT BIT(2) +#define PORD_F_INT BIT(3) +#define HDMI_VSYNC_INT BIT(4) +#define HDMI_AUDIO_INT BIT(5) +#define HDCP2X_RX_REAUTH_REQ_DDCM_INT BIT(25) +#define TOP_INT_ENABLE01 0x1b4 +#define TOP_INT_CLR00 0x1b8 +#define TOP_INT_CLR01 0x1bc + + +/* Video Mute */ +#define TOP_VMUTE_CFG1 0x1c8 +#define REG_VMUTE_EN BIT(16) + +/* HDMI Audio IP */ +#define AIP_CTRL 0x400 +#define CTS_SW_SEL BIT(0) +#define CTS_REQ_EN BIT(1) +#define MCLK_EN BIT(2) +#define NO_MCLK_CTSGEN_SEL BIT(3) +#define AUD_IN_EN BIT(8) +#define AUD_SEL_OWRT BIT(9) +#define SPDIF_EN BIT(13) +#define HBRA_ON BIT(14) +#define DSD_EN BIT(15) +#define I2S_EN GENMASK(19, 16) +#define HBR_FROM_SPDIF BIT(20) +#define CTS_CAL_N4 BIT(23) +#define SPDIF_INTERNAL_MODULE BIT(24) +#define AIP_N_VAL 0x404 +#define AIP_CTS_SVAL 0x408 +#define AIP_SPDIF_CTRL 0x40c +#define WR_1UI_LOCK BIT(0) +#define FS_OVERRIDE_WRITE BIT(1) +#define WR_2UI_LOCK BIT(2) +#define MAX_1UI_WRITE GENMASK(15, 8) +#define MAX_2UI_SPDIF_WRITE GENMASK(23, 16) +#define MAX_2UI_I2S_HI_WRITE GENMASK(23, 20) +#define MAX_2UI_I2S_LFE_CC_SWAP BIT(1) +#define MAX_2UI_I2S_LO_WRITE GENMASK(19, 16) +#define AUD_ERR_THRESH GENMASK(29, 24) +#define I2S2DSD_EN BIT(30) +#define AIP_I2S_CTRL 0x410 +#define FIFO0_MAP GENMASK(1, 0) +#define FIFO1_MAP GENMASK(3, 2) +#define FIFO2_MAP GENMASK(5, 4) +#define FIFO3_MAP GENMASK(7, 6) +#define I2S_1ST_BIT_NOSHIFT BIT(8) +#define I2S_DATA_DIR_LSB BIT(9) +#define JUSTIFY_RIGHT BIT(10) +#define WS_HIGH BIT(11) +#define VBIT_COMPRESSED BIT(12) +#define CBIT_ORDER_SAME BIT(13) +#define SCK_EDGE_RISE BIT(14) +#define AIP_I2S_CHST0 0x414 +#define AIP_I2S_CHST1 0x418 +#define AIP_TXCTRL 0x424 +#define RST4AUDIO BIT(0) +#define RST4AUDIO_FIFO BIT(1) +#define RST4AUDIO_ACR BIT(2) +#define AUD_LAYOUT_1 BIT(4) +#define AUD_MUTE_FIFO_EN BIT(5) +#define AUD_PACKET_DROP BIT(6) +#define DSD_MUTE_EN BIT(7) +#define AIP_TPI_CTRL 0x428 +#define TPI_AUDIO_LOOKUP_EN BIT(2) + +/* Video downsampling configuration */ +#define VID_DOWNSAMPLE_CONFIG 0x8d0 +#define C444_C422_CONFIG_ENABLE BIT(0) +#define C422_C420_CONFIG_ENABLE BIT(4) +#define C422_C420_CONFIG_BYPASS BIT(5) +#define C422_C420_CONFIG_OUT_CB_OR_CR BIT(6) +#define VID_OUT_FORMAT 0x8fc +#define OUTPUT_FORMAT_DEMUX_420_ENABLE BIT(10) + +/* HDCP registers */ +#define HDCP_TOP_CTRL 0xc00 +#define HDCP2X_CTRL_0 0xc20 +#define HDCP2X_EN BIT(0) +#define HDCP2X_ENCRYPT_EN BIT(7) +#define HDCP2X_HPD_OVR BIT(10) +#define HDCP2X_HPD_SW BIT(11) +#define HDCP2X_POL_CTRL 0xc54 +#define HDCP2X_DIS_POLL_EN BIT(16) +#define HDCP1X_CTRL 0xcd0 +#define HDCP1X_ENC_EN BIT(6) + +/* HDMI DDC registers */ +#define HPD_DDC_CTRL 0xc08 +#define HPD_DDC_DELAY_CNT GENMASK(31, 16) +#define HPD_DDC_HPD_DBNC_EN BIT(2) +#define HPD_DDC_PORD_DBNC_EN BIT(3) +#define DDC_CTRL 0xc10 +#define DDC_CTRL_ADDR GENMASK(7, 1) +#define DDC_CTRL_OFFSET GENMASK(15, 8) +#define DDC_CTRL_DIN_CNT GENMASK(25, 16) +#define DDC_CTRL_CMD GENMASK(31, 28) +#define SCDC_CTRL 0xc18 +#define SCDC_DDC_SEGMENT GENMASK(15, 8) +#define HPD_DDC_STATUS 0xc60 +#define HPD_STATE GENMASK(1, 0) +#define HPD_STATE_CONNECTED 2 +#define HPD_PIN_STA BIT(4) +#define PORD_PIN_STA BIT(5) +#define DDC_I2C_IN_PROG BIT(13) +#define DDC_DATA_OUT GENMASK(23, 16) +#define SI2C_CTRL 0xcac +#define SI2C_WR BIT(0) +#define SI2C_RD BIT(1) +#define SI2C_CONFIRM_READ BIT(2) +#define SI2C_WDATA GENMASK(15, 8) +#define SI2C_ADDR GENMASK(23, 16) + +/* HDCP DDC registers */ +#define HDCP2X_DDCM_STATUS 0xc68 +#define DDC_I2C_NO_ACK BIT(10) +#define DDC_I2C_BUS_LOW BIT(11) + +/* HDMI TX registers */ +#define HDMITX_CONFIG_MT8188 0xea0 +#define HDMITX_CONFIG_MT8195 0x900 +#define HDMI_YUV420_MODE BIT(10) +#define HDMITX_SW_HPD BIT(29) +#define HDMITX_SW_RSTB BIT(31) + +/** + * enum mtk_hdmi_ddc_v2_cmds - DDC_CMD register commands + * @DDC_CMD_READ_NOACK: Current address read with no ACK on last byte + * @DDC_CMD_READ: Current address read with ACK on last byte + * @DDC_CMD_SEQ_READ_NOACK: Sequential read with no ACK on last byte + * @DDC_CMD_SEQ_READ: Sequential read with ACK on last byte + * @DDC_CMD_ENH_READ_NOACK: Enhanced read with no ACK on last byte + * @DDC_CMD_ENH_READ: Enhanced read with ACK on last byte + * @DDC_CMD_SEQ_WRITE_NOACK: Sequential write ignoring ACK on last byte + * @DDC_CMD_SEQ_WRITE: Sequential write requiring ACK on last byte + * @DDC_CMD_RSVD: Reserved for future use + * @DDC_CMD_CLEAR_FIFO: Clear DDC I2C FIFO + * @DDC_CMD_CLOCK_SCL: Start clocking DDC I2C SCL + * @DDC_CMD_ABORT_XFER: Abort DDC I2C transaction + */ +enum mtk_hdmi_ddc_v2_cmds { + DDC_CMD_READ_NOACK = 0x0, + DDC_CMD_READ, + DDC_CMD_SEQ_READ_NOACK, + DDC_CMD_SEQ_READ, + DDC_CMD_ENH_READ_NOACK, + DDC_CMD_ENH_READ, + DDC_CMD_SEQ_WRITE_NOACK, + DDC_CMD_SEQ_WRITE = 0x07, + DDC_CMD_CLEAR_FIFO = 0x09, + DDC_CMD_CLOCK_SCL = 0x0a, + DDC_CMD_ABORT_XFER = 0x0f +}; + +#endif /* _MTK_HDMI_REGS_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c b/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c new file mode 100644 index 000000000000..c272e1e74b7d --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c @@ -0,0 +1,1521 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek HDMI v2 IP driver + * + * Copyright (c) 2022 MediaTek Inc. + * Copyright (c) 2022 BayLibre, SAS + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/suspend.h> +#include <linux/units.h> +#include <linux/phy/phy.h> + +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> +#include <drm/display/drm_scdc_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_edid.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include "mtk_hdmi_common.h" +#include "mtk_hdmi_regs_v2.h" + +#define MTK_HDMI_V2_CLOCK_MIN 27000 +#define MTK_HDMI_V2_CLOCK_MAX 594000 + +#define HPD_PORD_HWIRQS (HTPLG_R_INT | HTPLG_F_INT | PORD_F_INT | PORD_R_INT) + +enum mtk_hdmi_v2_clk_id { + MTK_HDMI_V2_CLK_HDCP_SEL, + MTK_HDMI_V2_CLK_HDCP_24M_SEL, + MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI, + MTK_HDMI_V2_CLK_HDMI_APB_SEL, + MTK_HDMI_V2_CLK_COUNT, +}; + +const char *const mtk_hdmi_v2_clk_names[MTK_HDMI_V2_CLK_COUNT] = { + [MTK_HDMI_V2_CLK_HDMI_APB_SEL] = "bus", + [MTK_HDMI_V2_CLK_HDCP_SEL] = "hdcp", + [MTK_HDMI_V2_CLK_HDCP_24M_SEL] = "hdcp24m", + [MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI] = "hdmi-split", +}; + +static inline void mtk_hdmi_v2_hwirq_disable(struct mtk_hdmi *hdmi) +{ + regmap_write(hdmi->regs, TOP_INT_ENABLE00, 0); + regmap_write(hdmi->regs, TOP_INT_ENABLE01, 0); +} + +static inline void mtk_hdmi_v2_enable_hpd_pord_irq(struct mtk_hdmi *hdmi, bool enable) +{ + if (enable) + regmap_set_bits(hdmi->regs, TOP_INT_ENABLE00, HPD_PORD_HWIRQS); + else + regmap_clear_bits(hdmi->regs, TOP_INT_ENABLE00, HPD_PORD_HWIRQS); +} + +static inline void mtk_hdmi_v2_set_sw_hpd(struct mtk_hdmi *hdmi, bool enable) +{ + if (enable) { + regmap_set_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_HPD); + regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_OVR); + regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_SW); + } else { + regmap_clear_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_OVR); + regmap_clear_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_SW); + regmap_clear_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_HPD); + } +} + +static inline void mtk_hdmi_v2_enable_scrambling(struct mtk_hdmi *hdmi, bool enable) +{ + struct drm_scdc *scdc = &hdmi->curr_conn->display_info.hdmi.scdc; + + if (enable) + regmap_set_bits(hdmi->regs, TOP_CFG00, SCR_ON | HDMI2_ON); + else + regmap_clear_bits(hdmi->regs, TOP_CFG00, SCR_ON | HDMI2_ON); + + if (scdc->supported) { + if (scdc->scrambling.supported) + drm_scdc_set_scrambling(hdmi->curr_conn, enable); + drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, enable); + } +} + +static void mtk_hdmi_v2_hw_vid_mute(struct mtk_hdmi *hdmi, bool enable) +{ + /* If enabled, sends a black image */ + if (enable) + regmap_set_bits(hdmi->regs, TOP_VMUTE_CFG1, REG_VMUTE_EN); + else + regmap_clear_bits(hdmi->regs, TOP_VMUTE_CFG1, REG_VMUTE_EN); +} + +static void mtk_hdmi_v2_hw_aud_mute(struct mtk_hdmi *hdmi, bool enable) +{ + u32 aip, val; + + if (!enable) { + regmap_clear_bits(hdmi->regs, AIP_TXCTRL, AUD_MUTE_FIFO_EN); + return; + } + + regmap_read(hdmi->regs, AIP_CTRL, &aip); + + val = AUD_MUTE_FIFO_EN; + if (aip & DSD_EN) + val |= DSD_MUTE_EN; + + regmap_update_bits(hdmi->regs, AIP_TXCTRL, val, val); +} + +static void mtk_hdmi_v2_hw_reset(struct mtk_hdmi *hdmi) +{ + regmap_clear_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_RSTB); + udelay(5); + regmap_set_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_RSTB); +} + +static inline u32 mtk_hdmi_v2_format_hw_packet(const u8 *buffer, u8 len) +{ + unsigned short i; + u32 val = 0; + + for (i = 0; i < len; i++) + val |= buffer[i] << (i * 8); + + return val; +} + +static void mtk_hdmi_v2_hw_write_audio_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer) +{ + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AUD_EN | AUD_EN_WR); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AUD_RPT_EN); + + regmap_write(hdmi->regs, TOP_AIF_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3)); + regmap_write(hdmi->regs, TOP_AIF_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 3)); + regmap_write(hdmi->regs, TOP_AIF_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 2)); + regmap_write(hdmi->regs, TOP_AIF_PKT02, 0); + regmap_write(hdmi->regs, TOP_AIF_PKT03, 0); + + regmap_set_bits(hdmi->regs, TOP_INFO_RPT, AUD_RPT_EN); + regmap_set_bits(hdmi->regs, TOP_INFO_EN, AUD_EN | AUD_EN_WR); +} + +static void mtk_hdmi_v2_hw_write_avi_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer) +{ + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AVI_EN_WR | AVI_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AVI_RPT_EN); + + regmap_write(hdmi->regs, TOP_AVI_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3)); + regmap_write(hdmi->regs, TOP_AVI_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 4)); + regmap_write(hdmi->regs, TOP_AVI_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 3)); + regmap_write(hdmi->regs, TOP_AVI_PKT02, mtk_hdmi_v2_format_hw_packet(&buffer[10], 4)); + regmap_write(hdmi->regs, TOP_AVI_PKT03, mtk_hdmi_v2_format_hw_packet(&buffer[14], 3)); + regmap_write(hdmi->regs, TOP_AVI_PKT04, 0); + regmap_write(hdmi->regs, TOP_AVI_PKT05, 0); + + regmap_set_bits(hdmi->regs, TOP_INFO_RPT, AVI_RPT_EN); + regmap_set_bits(hdmi->regs, TOP_INFO_EN, AVI_EN_WR | AVI_EN); +} + +static void mtk_hdmi_v2_hw_write_spd_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer) +{ + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, SPD_EN_WR | SPD_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, SPD_RPT_EN); + + regmap_write(hdmi->regs, TOP_SPDIF_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 4)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 3)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT02, mtk_hdmi_v2_format_hw_packet(&buffer[10], 4)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT03, mtk_hdmi_v2_format_hw_packet(&buffer[14], 3)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT04, mtk_hdmi_v2_format_hw_packet(&buffer[17], 4)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT05, mtk_hdmi_v2_format_hw_packet(&buffer[21], 3)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT06, mtk_hdmi_v2_format_hw_packet(&buffer[24], 4)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT07, buffer[28]); + + regmap_set_bits(hdmi->regs, TOP_INFO_EN, SPD_EN_WR | SPD_EN); + regmap_set_bits(hdmi->regs, TOP_INFO_RPT, SPD_RPT_EN); +} + +static void mtk_hdmi_v2_hw_write_vendor_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer) +{ + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, VSIF_EN_WR | VSIF_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, VSIF_RPT_EN); + + regmap_write(hdmi->regs, TOP_VSIF_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3)); + regmap_write(hdmi->regs, TOP_VSIF_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 4)); + regmap_write(hdmi->regs, TOP_VSIF_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 2)); + regmap_write(hdmi->regs, TOP_VSIF_PKT02, 0); + regmap_write(hdmi->regs, TOP_VSIF_PKT03, 0); + regmap_write(hdmi->regs, TOP_VSIF_PKT04, 0); + regmap_write(hdmi->regs, TOP_VSIF_PKT05, 0); + regmap_write(hdmi->regs, TOP_VSIF_PKT06, 0); + regmap_write(hdmi->regs, TOP_VSIF_PKT07, 0); + + regmap_set_bits(hdmi->regs, TOP_INFO_EN, VSIF_EN_WR | VSIF_EN); + regmap_set_bits(hdmi->regs, TOP_INFO_RPT, VSIF_RPT_EN); +} + +static void mtk_hdmi_yuv420_downsampling(struct mtk_hdmi *hdmi, bool enable) +{ + u32 val; + + regmap_read(hdmi->regs, VID_DOWNSAMPLE_CONFIG, &val); + + if (enable) { + regmap_set_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMI_YUV420_MODE); + + val |= C444_C422_CONFIG_ENABLE | C422_C420_CONFIG_ENABLE; + val |= C422_C420_CONFIG_OUT_CB_OR_CR; + val &= ~C422_C420_CONFIG_BYPASS; + regmap_write(hdmi->regs, VID_DOWNSAMPLE_CONFIG, val); + + regmap_set_bits(hdmi->regs, VID_OUT_FORMAT, OUTPUT_FORMAT_DEMUX_420_ENABLE); + } else { + regmap_clear_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMI_YUV420_MODE); + + val &= ~(C444_C422_CONFIG_ENABLE | C422_C420_CONFIG_ENABLE); + val &= ~C422_C420_CONFIG_OUT_CB_OR_CR; + val |= C422_C420_CONFIG_BYPASS; + regmap_write(hdmi->regs, VID_DOWNSAMPLE_CONFIG, val); + + regmap_clear_bits(hdmi->regs, VID_OUT_FORMAT, OUTPUT_FORMAT_DEMUX_420_ENABLE); + } +} + +static int mtk_hdmi_v2_setup_audio_infoframe(struct mtk_hdmi *hdmi) +{ + struct hdmi_codec_params *params = &hdmi->aud_param.codec_params; + struct hdmi_audio_infoframe frame; + u8 buffer[14]; + ssize_t ret; + + memcpy(&frame, ¶ms->cea, sizeof(frame)); + + ret = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (ret < 0) + return ret; + + mtk_hdmi_v2_hw_write_audio_infoframe(hdmi, buffer); + + return 0; +} + +static inline void mtk_hdmi_v2_hw_gcp_avmute(struct mtk_hdmi *hdmi, bool mute) +{ + u32 val; + + regmap_read(hdmi->regs, TOP_CFG01, &val); + val &= ~(CP_CLR_MUTE_EN | CP_SET_MUTE_EN); + + if (mute) { + val |= CP_SET_MUTE_EN; + val &= ~CP_CLR_MUTE_EN; + } else { + val |= CP_CLR_MUTE_EN; + val &= ~CP_SET_MUTE_EN; + } + regmap_write(hdmi->regs, TOP_CFG01, val); + + regmap_set_bits(hdmi->regs, TOP_INFO_RPT, CP_RPT_EN); + regmap_set_bits(hdmi->regs, TOP_INFO_EN, CP_EN | CP_EN_WR); +} + +static void mtk_hdmi_v2_hw_ncts_enable(struct mtk_hdmi *hdmi, bool enable) +{ + if (enable) + regmap_set_bits(hdmi->regs, AIP_CTRL, CTS_SW_SEL); + else + regmap_clear_bits(hdmi->regs, AIP_CTRL, CTS_SW_SEL); +} + +static void mtk_hdmi_v2_hw_aud_set_channel_status(struct mtk_hdmi *hdmi) +{ + u8 *ch_status = hdmi->aud_param.codec_params.iec.status; + + /* Only the first 5 to 7 bytes of Channel Status contain useful information */ + regmap_write(hdmi->regs, AIP_I2S_CHST0, mtk_hdmi_v2_format_hw_packet(&ch_status[0], 4)); + regmap_write(hdmi->regs, AIP_I2S_CHST1, mtk_hdmi_v2_format_hw_packet(&ch_status[4], 3)); +} + +static void mtk_hdmi_v2_hw_aud_set_ncts(struct mtk_hdmi *hdmi, + unsigned int sample_rate, + unsigned int clock) +{ + unsigned int n, cts; + + mtk_hdmi_get_ncts(sample_rate, clock, &n, &cts); + + regmap_write(hdmi->regs, AIP_N_VAL, n); + regmap_write(hdmi->regs, AIP_CTS_SVAL, cts); +} + +static void mtk_hdmi_v2_hw_aud_enable(struct mtk_hdmi *hdmi, bool enable) +{ + if (enable) + regmap_clear_bits(hdmi->regs, AIP_TXCTRL, AUD_PACKET_DROP); + else + regmap_set_bits(hdmi->regs, AIP_TXCTRL, AUD_PACKET_DROP); +} + +static u32 mtk_hdmi_v2_aud_output_channel_map(u8 sd0, u8 sd1, u8 sd2, u8 sd3, + u8 sd4, u8 sd5, u8 sd6, u8 sd7) +{ + u32 val; + + /* + * Each of the Output Channels (0-7) can be mapped to get their input + * from any of the available Input Channels (0-7): this function + * takes input channel numbers and formats a value that must then + * be written to the TOP_AUD_MAP hardware register by the caller. + */ + val = FIELD_PREP(SD0_MAP, sd0) | FIELD_PREP(SD1_MAP, sd1); + val |= FIELD_PREP(SD2_MAP, sd2) | FIELD_PREP(SD3_MAP, sd3); + val |= FIELD_PREP(SD4_MAP, sd4) | FIELD_PREP(SD5_MAP, sd5); + val |= FIELD_PREP(SD6_MAP, sd6) | FIELD_PREP(SD7_MAP, sd7); + + return val; +} + +static void mtk_hdmi_audio_dsd_config(struct mtk_hdmi *hdmi, + unsigned char chnum, bool dsd_bypass) +{ + u32 channel_map; + + regmap_update_bits(hdmi->regs, AIP_CTRL, SPDIF_EN | DSD_EN | HBRA_ON, DSD_EN); + regmap_set_bits(hdmi->regs, AIP_TXCTRL, DSD_MUTE_EN); + + if (dsd_bypass) + channel_map = mtk_hdmi_v2_aud_output_channel_map(0, 2, 4, 6, 1, 3, 5, 7); + else + channel_map = mtk_hdmi_v2_aud_output_channel_map(0, 5, 1, 0, 3, 2, 4, 0); + + regmap_write(hdmi->regs, TOP_AUD_MAP, channel_map); + regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, I2S2DSD_EN); +} + +static inline void mtk_hdmi_v2_hw_i2s_fifo_map(struct mtk_hdmi *hdmi, u32 fifo_mapping) +{ + regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, + FIFO0_MAP | FIFO1_MAP | FIFO2_MAP | FIFO3_MAP, fifo_mapping); +} + +static inline void mtk_hdmi_v2_hw_i2s_ch_number(struct mtk_hdmi *hdmi, u8 chnum) +{ + regmap_update_bits(hdmi->regs, AIP_CTRL, I2S_EN, FIELD_PREP(I2S_EN, chnum)); +} + +static void mtk_hdmi_v2_hw_i2s_ch_mapping(struct mtk_hdmi *hdmi, u8 chnum, u8 mapping) +{ + u32 fifo_map; + u8 bdata; + + switch (chnum) { + default: + case 2: + bdata = 0x1; + break; + case 3: + bdata = 0x3; + break; + case 6: + if (mapping == 0x0e) { + bdata = 0xf; + break; + } + fallthrough; + case 5: + bdata = 0x7; + break; + case 7: + case 8: + bdata = 0xf; + break; + } + + /* Assign default FIFO mapping: SD0 to FIFO0, SD1 to FIFO1, etc. */ + fifo_map = FIELD_PREP(FIFO0_MAP, 0) | FIELD_PREP(FIFO1_MAP, 1); + fifo_map |= FIELD_PREP(FIFO2_MAP, 2) | FIELD_PREP(FIFO3_MAP, 3); + mtk_hdmi_v2_hw_i2s_fifo_map(hdmi, fifo_map); + mtk_hdmi_v2_hw_i2s_ch_number(hdmi, bdata); + + /* + * Set HDMI Audio packet layout indicator: + * Layout 0 is for two channels + * Layout 1 is for up to eight channels + */ + if (chnum == 2) + regmap_set_bits(hdmi->regs, AIP_TXCTRL, AUD_LAYOUT_1); + else + regmap_clear_bits(hdmi->regs, AIP_TXCTRL, AUD_LAYOUT_1); +} + +static void mtk_hdmi_i2s_data_fmt(struct mtk_hdmi *hdmi, unsigned char fmt) +{ + u32 val; + + regmap_read(hdmi->regs, AIP_I2S_CTRL, &val); + val &= ~(WS_HIGH | I2S_1ST_BIT_NOSHIFT | JUSTIFY_RIGHT); + + switch (fmt) { + case HDMI_I2S_MODE_RJT_24BIT: + case HDMI_I2S_MODE_RJT_16BIT: + val |= (WS_HIGH | I2S_1ST_BIT_NOSHIFT | JUSTIFY_RIGHT); + break; + case HDMI_I2S_MODE_LJT_24BIT: + case HDMI_I2S_MODE_LJT_16BIT: + val |= (WS_HIGH | I2S_1ST_BIT_NOSHIFT); + break; + case HDMI_I2S_MODE_I2S_24BIT: + case HDMI_I2S_MODE_I2S_16BIT: + default: + break; + } + + regmap_write(hdmi->regs, AIP_I2S_CTRL, val); +} + +static inline void mtk_hdmi_i2s_sck_edge_rise(struct mtk_hdmi *hdmi, bool rise) +{ + if (rise) + regmap_set_bits(hdmi->regs, AIP_I2S_CTRL, SCK_EDGE_RISE); + else + regmap_clear_bits(hdmi->regs, AIP_I2S_CTRL, SCK_EDGE_RISE); +} + +static inline void mtk_hdmi_i2s_cbit_order(struct mtk_hdmi *hdmi, unsigned int cbit) +{ + regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, CBIT_ORDER_SAME, cbit); +} + +static inline void mtk_hdmi_i2s_vbit(struct mtk_hdmi *hdmi, unsigned int vbit) +{ + /* V bit: 0 for PCM, 1 for Compressed data */ + regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, VBIT_COMPRESSED, vbit); +} + +static inline void mtk_hdmi_i2s_data_direction(struct mtk_hdmi *hdmi, unsigned int is_lsb) +{ + regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, I2S_DATA_DIR_LSB, is_lsb); +} + +static inline void mtk_hdmi_v2_hw_audio_type(struct mtk_hdmi *hdmi, unsigned int spdif_i2s) +{ + regmap_update_bits(hdmi->regs, AIP_CTRL, SPDIF_EN, FIELD_PREP(SPDIF_EN, spdif_i2s)); +} + +static u8 mtk_hdmi_v2_get_i2s_ch_mapping(struct mtk_hdmi *hdmi, u8 channel_type) +{ + switch (channel_type) { + case HDMI_AUD_CHAN_TYPE_1_1: + case HDMI_AUD_CHAN_TYPE_2_1: + return 0x01; + case HDMI_AUD_CHAN_TYPE_3_0: + return 0x02; + case HDMI_AUD_CHAN_TYPE_3_1: + return 0x03; + case HDMI_AUD_CHAN_TYPE_3_0_LRS: + case HDMI_AUD_CHAN_TYPE_4_0: + return 0x08; + case HDMI_AUD_CHAN_TYPE_5_1: + return 0x0b; + case HDMI_AUD_CHAN_TYPE_4_1_CLRS: + case HDMI_AUD_CHAN_TYPE_6_0: + case HDMI_AUD_CHAN_TYPE_6_0_CS: + case HDMI_AUD_CHAN_TYPE_6_0_CH: + case HDMI_AUD_CHAN_TYPE_6_0_OH: + case HDMI_AUD_CHAN_TYPE_6_0_CHR: + return 0x0e; + case HDMI_AUD_CHAN_TYPE_1_0: + case HDMI_AUD_CHAN_TYPE_2_0: + case HDMI_AUD_CHAN_TYPE_3_1_LRS: + case HDMI_AUD_CHAN_TYPE_4_1: + case HDMI_AUD_CHAN_TYPE_5_0: + case HDMI_AUD_CHAN_TYPE_4_0_CLRS: + case HDMI_AUD_CHAN_TYPE_6_1: + case HDMI_AUD_CHAN_TYPE_6_1_CS: + case HDMI_AUD_CHAN_TYPE_6_1_CH: + case HDMI_AUD_CHAN_TYPE_6_1_OH: + case HDMI_AUD_CHAN_TYPE_6_1_CHR: + case HDMI_AUD_CHAN_TYPE_7_0: + case HDMI_AUD_CHAN_TYPE_7_0_LH_RH: + case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_7_0_LC_RC: + case HDMI_AUD_CHAN_TYPE_7_0_LW_RW: + case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD: + case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS: + case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS: + case HDMI_AUD_CHAN_TYPE_7_0_CS_CH: + case HDMI_AUD_CHAN_TYPE_7_0_CS_OH: + case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_CH_OH: + case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS: + case HDMI_AUD_CHAN_TYPE_7_1: + case HDMI_AUD_CHAN_TYPE_7_1_LH_RH: + case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_7_1_LC_RC: + case HDMI_AUD_CHAN_TYPE_7_1_LW_RW: + case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD: + case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS: + case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS: + case HDMI_AUD_CHAN_TYPE_7_1_CS_CH: + case HDMI_AUD_CHAN_TYPE_7_1_CS_OH: + case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_CH_OH: + case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR: + default: + return 0; + } + + return 0; +} + +static inline void mtk_hdmi_v2_hw_i2s_ch_swap(struct mtk_hdmi *hdmi) +{ + regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_2UI_I2S_HI_WRITE, + FIELD_PREP(MAX_2UI_I2S_HI_WRITE, MAX_2UI_I2S_LFE_CC_SWAP)); +} + +static void mtk_hdmi_hbr_config(struct mtk_hdmi *hdmi, bool dsd_bypass) +{ + const u32 hbr_mask = SPDIF_EN | DSD_EN | HBRA_ON; + + if (dsd_bypass) { + regmap_update_bits(hdmi->regs, AIP_CTRL, hbr_mask, HBRA_ON); + regmap_set_bits(hdmi->regs, AIP_CTRL, I2S_EN); + } else { + regmap_update_bits(hdmi->regs, AIP_CTRL, hbr_mask, SPDIF_EN); + regmap_set_bits(hdmi->regs, AIP_CTRL, SPDIF_INTERNAL_MODULE); + regmap_set_bits(hdmi->regs, AIP_CTRL, HBR_FROM_SPDIF); + regmap_set_bits(hdmi->regs, AIP_CTRL, CTS_CAL_N4); + } +} + +static inline void mtk_hdmi_v2_hw_spdif_config(struct mtk_hdmi *hdmi) +{ + regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, WR_1UI_LOCK); + regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, FS_OVERRIDE_WRITE); + regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, WR_2UI_LOCK); + + regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_1UI_WRITE, + FIELD_PREP(MAX_1UI_WRITE, 4)); + regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_2UI_SPDIF_WRITE, + FIELD_PREP(MAX_2UI_SPDIF_WRITE, 9)); + regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, AUD_ERR_THRESH, + FIELD_PREP(AUD_ERR_THRESH, 4)); + + regmap_set_bits(hdmi->regs, AIP_SPDIF_CTRL, I2S2DSD_EN); +} + +static void mtk_hdmi_v2_aud_set_input(struct mtk_hdmi *hdmi) +{ + struct hdmi_audio_param *aud_param = &hdmi->aud_param; + struct hdmi_codec_params *codec_params = &aud_param->codec_params; + u8 i2s_ch_map; + u32 out_ch_map; + + /* Write the default output channel map. CH0 maps to SD0, CH1 maps to SD1, etc */ + out_ch_map = mtk_hdmi_v2_aud_output_channel_map(0, 1, 2, 3, 4, 5, 6, 7); + regmap_write(hdmi->regs, TOP_AUD_MAP, out_ch_map); + + regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_2UI_I2S_HI_WRITE, 0); + regmap_clear_bits(hdmi->regs, AIP_CTRL, + SPDIF_EN | DSD_EN | HBRA_ON | CTS_CAL_N4 | + HBR_FROM_SPDIF | SPDIF_INTERNAL_MODULE); + regmap_clear_bits(hdmi->regs, AIP_TXCTRL, DSD_MUTE_EN | AUD_LAYOUT_1); + + if (aud_param->aud_input_type == HDMI_AUD_INPUT_I2S) { + switch (aud_param->aud_codec) { + case HDMI_AUDIO_CODING_TYPE_DTS_HD: + case HDMI_AUDIO_CODING_TYPE_MLP: + mtk_hdmi_i2s_data_fmt(hdmi, aud_param->aud_i2s_fmt); + mtk_hdmi_hbr_config(hdmi, true); + break; + case HDMI_AUDIO_CODING_TYPE_DSD: + mtk_hdmi_audio_dsd_config(hdmi, codec_params->channels, 0); + mtk_hdmi_v2_hw_i2s_ch_mapping(hdmi, codec_params->channels, 1); + break; + default: + mtk_hdmi_i2s_data_fmt(hdmi, aud_param->aud_i2s_fmt); + mtk_hdmi_i2s_sck_edge_rise(hdmi, true); + mtk_hdmi_i2s_cbit_order(hdmi, CBIT_ORDER_SAME); + mtk_hdmi_i2s_vbit(hdmi, 0); /* PCM data */ + mtk_hdmi_i2s_data_direction(hdmi, 0); /* MSB first */ + mtk_hdmi_v2_hw_audio_type(hdmi, HDMI_AUD_INPUT_I2S); + i2s_ch_map = mtk_hdmi_v2_get_i2s_ch_mapping(hdmi, + aud_param->aud_input_chan_type); + mtk_hdmi_v2_hw_i2s_ch_mapping(hdmi, codec_params->channels, i2s_ch_map); + mtk_hdmi_v2_hw_i2s_ch_swap(hdmi); + } + } else { + if (codec_params->sample_rate == 768000 && + (aud_param->aud_codec == HDMI_AUDIO_CODING_TYPE_DTS_HD || + aud_param->aud_codec == HDMI_AUDIO_CODING_TYPE_MLP)) { + mtk_hdmi_hbr_config(hdmi, false); + } else { + mtk_hdmi_v2_hw_spdif_config(hdmi); + mtk_hdmi_v2_hw_i2s_ch_mapping(hdmi, 2, 0); + } + } +} + +static inline void mtk_hdmi_v2_hw_audio_input_enable(struct mtk_hdmi *hdmi, bool ena) +{ + if (ena) + regmap_set_bits(hdmi->regs, AIP_CTRL, AUD_IN_EN); + else + regmap_clear_bits(hdmi->regs, AIP_CTRL, AUD_IN_EN); +} + +static void mtk_hdmi_v2_aip_ctrl_init(struct mtk_hdmi *hdmi) +{ + regmap_set_bits(hdmi->regs, AIP_CTRL, + AUD_SEL_OWRT | NO_MCLK_CTSGEN_SEL | MCLK_EN | CTS_REQ_EN); + regmap_clear_bits(hdmi->regs, AIP_TPI_CTRL, TPI_AUDIO_LOOKUP_EN); +} + +static void mtk_hdmi_v2_audio_reset(struct mtk_hdmi *hdmi, bool reset) +{ + const u32 arst_bits = RST4AUDIO | RST4AUDIO_FIFO | RST4AUDIO_ACR; + + if (reset) + regmap_set_bits(hdmi->regs, AIP_TXCTRL, arst_bits); + else + regmap_clear_bits(hdmi->regs, AIP_TXCTRL, arst_bits); +} + +static void mtk_hdmi_v2_aud_output_config(struct mtk_hdmi *hdmi, + struct drm_display_mode *display_mode) +{ + /* Shut down and reset the HDMI Audio HW to avoid glitching */ + mtk_hdmi_v2_hw_aud_mute(hdmi, true); + mtk_hdmi_v2_hw_aud_enable(hdmi, false); + mtk_hdmi_v2_audio_reset(hdmi, true); + + /* Configure the main hardware params and get out of reset */ + mtk_hdmi_v2_aip_ctrl_init(hdmi); + mtk_hdmi_v2_aud_set_input(hdmi); + mtk_hdmi_v2_hw_aud_set_channel_status(hdmi); + mtk_hdmi_v2_setup_audio_infoframe(hdmi); + mtk_hdmi_v2_hw_audio_input_enable(hdmi, true); + mtk_hdmi_v2_audio_reset(hdmi, false); + + /* Ignore N/CTS packet transmission requests and configure */ + mtk_hdmi_v2_hw_ncts_enable(hdmi, false); + mtk_hdmi_v2_hw_aud_set_ncts(hdmi, hdmi->aud_param.codec_params.sample_rate, + display_mode->clock); + + /* Wait for the HW to apply settings */ + usleep_range(25, 50); + + /* Hardware is fully configured: enable TX of N/CTS pkts and unmute */ + mtk_hdmi_v2_hw_ncts_enable(hdmi, true); + mtk_hdmi_v2_hw_aud_enable(hdmi, true); + mtk_hdmi_v2_hw_aud_mute(hdmi, false); +} + +static void mtk_hdmi_v2_change_video_resolution(struct mtk_hdmi *hdmi, + struct drm_connector_state *conn_state) +{ + mtk_hdmi_v2_hw_reset(hdmi); + mtk_hdmi_v2_set_sw_hpd(hdmi, true); + udelay(2); + + regmap_write(hdmi->regs, HDCP_TOP_CTRL, 0); + + /* + * Enable HDCP reauthentication interrupt: the HW uses this internally + * for the HPD state machine even if HDCP encryption is not enabled. + */ + regmap_set_bits(hdmi->regs, TOP_INT_ENABLE00, HDCP2X_RX_REAUTH_REQ_DDCM_INT); + + /* Enable hotplug and pord interrupts */ + mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, true); + + /* Force enabling HDCP HPD */ + regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_OVR); + regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_SW); + + /* Set 8 bits per pixel */ + regmap_update_bits(hdmi->regs, TOP_CFG00, TMDS_PACK_MODE, + FIELD_PREP(TMDS_PACK_MODE, TMDS_PACK_MODE_8BPP)); + /* Disable generating deepcolor packets */ + regmap_clear_bits(hdmi->regs, TOP_CFG00, DEEPCOLOR_PKT_EN); + /* Disable adding deepcolor information to the general packet */ + regmap_clear_bits(hdmi->regs, TOP_MISC_CTLR, DEEP_COLOR_ADD); + + if (hdmi->curr_conn->display_info.is_hdmi) + regmap_set_bits(hdmi->regs, TOP_CFG00, HDMI_MODE_HDMI); + else + regmap_clear_bits(hdmi->regs, TOP_CFG00, HDMI_MODE_HDMI); + + udelay(5); + mtk_hdmi_v2_hw_vid_mute(hdmi, true); + mtk_hdmi_v2_hw_aud_mute(hdmi, true); + mtk_hdmi_v2_hw_gcp_avmute(hdmi, false); + + regmap_update_bits(hdmi->regs, TOP_CFG01, + NULL_PKT_VSYNC_HIGH_EN | NULL_PKT_EN, NULL_PKT_VSYNC_HIGH_EN); + usleep_range(100, 150); + + /* Enable scrambling if tmds clock is 340MHz or more */ + mtk_hdmi_v2_enable_scrambling(hdmi, hdmi->mode.clock >= 340 * KILO); + + switch (conn_state->hdmi.output_format) { + default: + case HDMI_COLORSPACE_RGB: + case HDMI_COLORSPACE_YUV444: + /* Disable YUV420 downsampling for RGB and YUV444 */ + mtk_hdmi_yuv420_downsampling(hdmi, false); + break; + case HDMI_COLORSPACE_YUV422: + /* + * YUV420 downsampling is special and needs a bit of setup + * so we disable everything there before doing anything else. + * + * YUV422 downsampling instead just needs one bit to be set. + */ + mtk_hdmi_yuv420_downsampling(hdmi, false); + regmap_set_bits(hdmi->regs, VID_DOWNSAMPLE_CONFIG, + C444_C422_CONFIG_ENABLE); + break; + case HDMI_COLORSPACE_YUV420: + mtk_hdmi_yuv420_downsampling(hdmi, true); + break; + }; +} + +static void mtk_hdmi_v2_output_set_display_mode(struct mtk_hdmi *hdmi, + struct drm_connector_state *conn_state, + struct drm_display_mode *mode) +{ + union phy_configure_opts opts = { + .dp = { .link_rate = hdmi->mode.clock * KILO } + }; + int ret; + + ret = phy_configure(hdmi->phy, &opts); + if (ret) + dev_err(hdmi->dev, "Setting clock=%d failed: %d", mode->clock, ret); + + mtk_hdmi_v2_change_video_resolution(hdmi, conn_state); + mtk_hdmi_v2_aud_output_config(hdmi, mode); +} + +static int mtk_hdmi_v2_clk_enable(struct mtk_hdmi *hdmi) +{ + int ret; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_SEL]); + if (ret) + return ret; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_24M_SEL]); + if (ret) + goto disable_hdcp_clk; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_HDMI_APB_SEL]); + if (ret) + goto disable_hdcp_24m_clk; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI]); + if (ret) + goto disable_bus_clk; + + return 0; + +disable_bus_clk: + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDMI_APB_SEL]); +disable_hdcp_24m_clk: + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_24M_SEL]); +disable_hdcp_clk: + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_SEL]); + + return ret; +} + +static void mtk_hdmi_v2_clk_disable(struct mtk_hdmi *hdmi) +{ + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI]); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDMI_APB_SEL]); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_24M_SEL]); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_SEL]); +} + +static enum hdmi_hpd_state mtk_hdmi_v2_hpd_pord_status(struct mtk_hdmi *hdmi) +{ + u8 hpd_pin_sta, pord_pin_sta; + u32 hpd_status; + + regmap_read(hdmi->regs, HPD_DDC_STATUS, &hpd_status); + hpd_pin_sta = FIELD_GET(HPD_PIN_STA, hpd_status); + pord_pin_sta = FIELD_GET(PORD_PIN_STA, hpd_status); + + /* + * Inform that the cable is plugged in (hpd_pin_sta) so that the + * sink can be powered on by switching the 5V VBUS as required by + * the HDMI spec for reading EDID and for HDMI Audio registers to + * be accessible. + * + * PORD detection succeeds only when the cable is plugged in and + * the sink is powered on: reaching that state means that the + * communication with the sink can be started. + * + * Please note that when the cable is plugged out the HPD pin will + * be the first one to fall, while PORD may still be in rise state + * for a few more milliseconds, so we decide HDMI_PLUG_OUT without + * checking PORD at all (we check only HPD falling for that). + */ + if (hpd_pin_sta && pord_pin_sta) + return HDMI_PLUG_IN_AND_SINK_POWER_ON; + else if (hpd_pin_sta) + return HDMI_PLUG_IN_ONLY; + else + return HDMI_PLUG_OUT; +} + +static irqreturn_t mtk_hdmi_v2_isr(int irq, void *arg) +{ + struct mtk_hdmi *hdmi = arg; + unsigned int irq_sta; + int ret = IRQ_HANDLED; + + regmap_read(hdmi->regs, TOP_INT_STA00, &irq_sta); + + /* Handle Hotplug Detection interrupts */ + if (irq_sta & HPD_PORD_HWIRQS) { + /* + * Disable the HPD/PORD IRQs now and until thread done to + * avoid interrupt storm that could happen with bad cables + */ + mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, false); + ret = IRQ_WAKE_THREAD; + + /* Clear HPD/PORD irqs to avoid unwanted retriggering */ + regmap_write(hdmi->regs, TOP_INT_CLR00, HPD_PORD_HWIRQS); + regmap_write(hdmi->regs, TOP_INT_CLR00, 0); + } + + return ret; +} + +static irqreturn_t __mtk_hdmi_v2_isr_thread(struct mtk_hdmi *hdmi) +{ + enum hdmi_hpd_state hpd; + + hpd = mtk_hdmi_v2_hpd_pord_status(hdmi); + if (hpd != hdmi->hpd) { + struct drm_encoder *encoder = hdmi->bridge.encoder; + + hdmi->hpd = hpd; + + if (encoder && encoder->dev) + drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev); + } + + mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, true); + return IRQ_HANDLED; +} + +static irqreturn_t mtk_hdmi_v2_isr_thread(int irq, void *arg) +{ + struct mtk_hdmi *hdmi = arg; + + /* + * Debounce HDMI monitor HPD status. + * Empirical testing shows that 30ms is enough wait + */ + msleep(30); + + return __mtk_hdmi_v2_isr_thread(hdmi); +} + +static int mtk_hdmi_v2_enable(struct mtk_hdmi *hdmi) +{ + bool was_active = pm_runtime_active(hdmi->dev); + int ret; + + ret = pm_runtime_resume_and_get(hdmi->dev); + if (ret) { + dev_err(hdmi->dev, "Cannot resume HDMI\n"); + return ret; + } + + ret = mtk_hdmi_v2_clk_enable(hdmi); + if (ret) { + pm_runtime_put(hdmi->dev); + return ret; + } + + if (!was_active) { + mtk_hdmi_v2_hw_reset(hdmi); + mtk_hdmi_v2_set_sw_hpd(hdmi, true); + } + + return 0; +} + +static void mtk_hdmi_v2_disable(struct mtk_hdmi *hdmi) +{ + mtk_hdmi_v2_clk_disable(hdmi); + pm_runtime_put_sync(hdmi->dev); +} + +/* + * Bridge callbacks + */ + +static int mtk_hdmi_v2_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + int ret; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + DRM_ERROR("The flag DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied\n"); + return -EINVAL; + } + if (hdmi->next_bridge) { + ret = drm_bridge_attach(encoder, hdmi->next_bridge, bridge, flags); + if (ret) + return ret; + } + + ret = mtk_hdmi_v2_enable(hdmi); + if (ret) + return ret; + + /* Enable Hotplug and Pord pins internal debouncing */ + regmap_set_bits(hdmi->regs, HPD_DDC_CTRL, + HPD_DDC_HPD_DBNC_EN | HPD_DDC_PORD_DBNC_EN); + + irq_clear_status_flags(hdmi->irq, IRQ_NOAUTOEN); + enable_irq(hdmi->irq); + + /* + * Check if any HDMI monitor was connected before probing this driver + * and/or attaching the bridge, without debouncing: if so, we want to + * notify the DRM so that we start outputting an image ASAP. + * Note that calling the ISR thread function will also perform a HW + * registers write that enables both the HPD and Pord interrupts. + */ + __mtk_hdmi_v2_isr_thread(hdmi); + + mtk_hdmi_v2_disable(hdmi); + + return 0; +} + +static void mtk_hdmi_v2_bridge_detach(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + WARN_ON(pm_runtime_active(hdmi->dev)); + + /* The controller is already powered off, just disable irq here */ + disable_irq(hdmi->irq); +} + +static void mtk_hdmi_v2_handle_plugged_change(struct mtk_hdmi *hdmi, bool plugged) +{ + mutex_lock(&hdmi->update_plugged_status_lock); + if (hdmi->plugged_cb && hdmi->codec_dev) + hdmi->plugged_cb(hdmi->codec_dev, plugged); + mutex_unlock(&hdmi->update_plugged_status_lock); +} + +static void mtk_hdmi_v2_bridge_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + struct drm_connector_state *conn_state; + union phy_configure_opts opts = { + .dp = { .link_rate = hdmi->mode.clock * KILO } + }; + int ret; + + /* Power on the controller before trying to write to registers */ + ret = mtk_hdmi_v2_enable(hdmi); + if (WARN_ON(ret)) + return; + + /* Retrieve the connector through the atomic state */ + hdmi->curr_conn = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + + conn_state = drm_atomic_get_new_connector_state(state, hdmi->curr_conn); + if (WARN_ON(!conn_state)) + return; + + /* + * Preconfigure the HDMI controller and the HDMI PHY at pre_enable + * stage to make sure that this IP is ready and clocked before the + * mtk_dpi gets powered on and before it enables the output. + */ + mtk_hdmi_v2_output_set_display_mode(hdmi, conn_state, &hdmi->mode); + + /* Reconfigure phy clock link with appropriate rate */ + phy_configure(hdmi->phy, &opts); + + /* Power on the PHY here to make sure that DPI_HDMI is clocked */ + phy_power_on(hdmi->phy); + + hdmi->powered = true; +} + +static void mtk_hdmi_v2_bridge_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + int ret; + + if (WARN_ON(!hdmi->powered)) + return; + + ret = drm_atomic_helper_connector_hdmi_update_infoframes(hdmi->curr_conn, state); + if (ret) + dev_err(hdmi->dev, "Could not update infoframes: %d\n", ret); + + mtk_hdmi_v2_hw_vid_mute(hdmi, false); + + /* signal the connect event to audio codec */ + mtk_hdmi_v2_handle_plugged_change(hdmi, true); + + hdmi->enabled = true; +} + +static void mtk_hdmi_v2_bridge_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + if (!hdmi->enabled) + return; + + mtk_hdmi_v2_hw_gcp_avmute(hdmi, true); + msleep(50); + mtk_hdmi_v2_hw_vid_mute(hdmi, true); + mtk_hdmi_v2_hw_aud_mute(hdmi, true); + msleep(50); + + hdmi->enabled = false; +} + +static void mtk_hdmi_v2_bridge_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + if (!hdmi->powered) + return; + + phy_power_off(hdmi->phy); + hdmi->powered = false; + + /* signal the disconnect event to audio codec */ + mtk_hdmi_v2_handle_plugged_change(hdmi, false); + + /* Power off */ + mtk_hdmi_v2_disable(hdmi); +} + +static enum drm_connector_status mtk_hdmi_v2_bridge_detect(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + return hdmi->hpd != HDMI_PLUG_OUT ? + connector_status_connected : connector_status_disconnected; +} + +static const struct drm_edid *mtk_hdmi_v2_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + return drm_edid_read(connector); +} + +static void mtk_hdmi_v2_hpd_enable(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + int ret; + + ret = mtk_hdmi_v2_enable(hdmi); + if (ret) { + dev_err(hdmi->dev, "Cannot power on controller for HPD: %d\n", ret); + return; + } + + mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, true); +} + +static void mtk_hdmi_v2_hpd_disable(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, false); + mtk_hdmi_v2_disable(hdmi); +} + +static int mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, + const struct drm_display_mode *mode, + unsigned long long tmds_rate) +{ + if (mode->clock < MTK_HDMI_V2_CLOCK_MIN) + return MODE_CLOCK_LOW; + else if (mode->clock > MTK_HDMI_V2_CLOCK_MAX) + return MODE_CLOCK_HIGH; + else + return MODE_OK; +} + +static int mtk_hdmi_v2_hdmi_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + switch (type) { + case HDMI_INFOFRAME_TYPE_AUDIO: + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AUD_EN_WR | AUD_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AUD_RPT_EN); + break; + case HDMI_INFOFRAME_TYPE_AVI: + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AVI_EN_WR | AVI_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AVI_RPT_EN); + break; + case HDMI_INFOFRAME_TYPE_SPD: + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, SPD_EN_WR | SPD_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, SPD_RPT_EN); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, VSIF_EN_WR | VSIF_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, VSIF_RPT_EN); + break; + case HDMI_INFOFRAME_TYPE_DRM: + default: + break; + }; + + return 0; +} + +static int mtk_hdmi_v2_hdmi_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + switch (type) { + case HDMI_INFOFRAME_TYPE_AUDIO: + mtk_hdmi_v2_hw_write_audio_infoframe(hdmi, buffer); + break; + case HDMI_INFOFRAME_TYPE_AVI: + mtk_hdmi_v2_hw_write_avi_infoframe(hdmi, buffer); + break; + case HDMI_INFOFRAME_TYPE_SPD: + mtk_hdmi_v2_hw_write_spd_infoframe(hdmi, buffer); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + mtk_hdmi_v2_hw_write_vendor_infoframe(hdmi, buffer); + break; + case HDMI_INFOFRAME_TYPE_DRM: + default: + dev_err(hdmi->dev, "Unsupported HDMI infoframe type %u\n", type); + break; + }; + + return 0; +} + +static int mtk_hdmi_v2_set_abist(struct mtk_hdmi *hdmi, bool enable) +{ + struct drm_display_mode *mode = &hdmi->mode; + int abist_format = -EINVAL; + bool interlaced; + + if (!enable) { + regmap_clear_bits(hdmi->regs, TOP_CFG00, HDMI_ABIST_ENABLE); + return 0; + } + + if (!mode->hdisplay || !mode->vdisplay) + return -EINVAL; + + interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + + switch (mode->hdisplay) { + case 720: + if (mode->vdisplay == 480) + abist_format = 2; + else if (mode->vdisplay == 576) + abist_format = 11; + break; + case 1280: + if (mode->vdisplay == 720) + abist_format = 3; + break; + case 1440: + if (mode->vdisplay == 480) + abist_format = interlaced ? 5 : 9; + else if (mode->vdisplay == 576) + abist_format = interlaced ? 14 : 18; + break; + case 1920: + if (mode->vdisplay == 1080) + abist_format = interlaced ? 4 : 10; + break; + case 3840: + if (mode->vdisplay == 2160) + abist_format = 25; + break; + case 4096: + if (mode->vdisplay == 2160) + abist_format = 26; + break; + default: + break; + } + if (abist_format < 0) + return abist_format; + + regmap_update_bits(hdmi->regs, TOP_CFG00, HDMI_ABIST_VIDEO_FORMAT, + FIELD_PREP(HDMI_ABIST_VIDEO_FORMAT, abist_format)); + regmap_set_bits(hdmi->regs, TOP_CFG00, HDMI_ABIST_ENABLE); + return 0; +} + +static int mtk_hdmi_v2_debug_abist_show(struct seq_file *m, void *arg) +{ + struct mtk_hdmi *hdmi = m->private; + bool en; + u32 val; + int ret; + + if (!hdmi) + return -EINVAL; + + ret = regmap_read(hdmi->regs, TOP_CFG00, &val); + if (ret) + return ret; + + en = FIELD_GET(HDMI_ABIST_ENABLE, val); + + seq_printf(m, "HDMI Automated Built-In Self Test: %s\n", + en ? "Enabled" : "Disabled"); + + return 0; +} + +static ssize_t mtk_hdmi_v2_debug_abist_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + int ret; + u32 en; + + if (!m || !m->private || *offp) + return -EINVAL; + + ret = kstrtouint_from_user(ubuf, len, 0, &en); + if (ret) + return ret; + + if (en < 0 || en > 1) + return -EINVAL; + + mtk_hdmi_v2_set_abist((struct mtk_hdmi *)m->private, en); + return len; +} + +static int mtk_hdmi_v2_debug_abist_open(struct inode *inode, struct file *file) +{ + return single_open(file, mtk_hdmi_v2_debug_abist_show, inode->i_private); +} + +static const struct file_operations mtk_hdmi_debug_abist_fops = { + .owner = THIS_MODULE, + .open = mtk_hdmi_v2_debug_abist_open, + .read = seq_read, + .write = mtk_hdmi_v2_debug_abist_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static void mtk_hdmi_v2_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +{ + struct mtk_hdmi *dpi = hdmi_ctx_from_bridge(bridge); + + debugfs_create_file("hdmi_abist", 0640, root, dpi, &mtk_hdmi_debug_abist_fops); +} + +static const struct drm_bridge_funcs mtk_v2_hdmi_bridge_funcs = { + .attach = mtk_hdmi_v2_bridge_attach, + .detach = mtk_hdmi_v2_bridge_detach, + .mode_fixup = mtk_hdmi_bridge_mode_fixup, + .mode_set = mtk_hdmi_bridge_mode_set, + .atomic_pre_enable = mtk_hdmi_v2_bridge_pre_enable, + .atomic_enable = mtk_hdmi_v2_bridge_enable, + .atomic_disable = mtk_hdmi_v2_bridge_disable, + .atomic_post_disable = mtk_hdmi_v2_bridge_post_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .detect = mtk_hdmi_v2_bridge_detect, + .edid_read = mtk_hdmi_v2_bridge_edid_read, + .hpd_enable = mtk_hdmi_v2_hpd_enable, + .hpd_disable = mtk_hdmi_v2_hpd_disable, + .hdmi_tmds_char_rate_valid = mtk_hdmi_v2_hdmi_tmds_char_rate_valid, + .hdmi_clear_infoframe = mtk_hdmi_v2_hdmi_clear_infoframe, + .hdmi_write_infoframe = mtk_hdmi_v2_hdmi_write_infoframe, + .debugfs_init = mtk_hdmi_v2_debugfs_init, +}; + +/* + * HDMI audio codec callbacks + */ +static int mtk_hdmi_v2_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + bool plugged; + + if (!hdmi) + return -ENODEV; + + mtk_hdmi_audio_set_plugged_cb(hdmi, fn, codec_dev); + plugged = (hdmi->hpd == HDMI_PLUG_IN_AND_SINK_POWER_ON); + mtk_hdmi_v2_handle_plugged_change(hdmi, plugged); + + return 0; +} + +static int mtk_hdmi_v2_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + if (hdmi->audio_enable) { + mtk_hdmi_audio_params(hdmi, daifmt, params); + mtk_hdmi_v2_aud_output_config(hdmi, &hdmi->mode); + } + return 0; +} + +static int mtk_hdmi_v2_audio_startup(struct device *dev, void *data) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + mtk_hdmi_v2_hw_aud_enable(hdmi, true); + hdmi->audio_enable = true; + + return 0; +} + +static void mtk_hdmi_v2_audio_shutdown(struct device *dev, void *data) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + hdmi->audio_enable = false; + mtk_hdmi_v2_hw_aud_enable(hdmi, false); +} + +static int mtk_hdmi_v2_audio_mute(struct device *dev, void *data, bool enable, int dir) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + mtk_hdmi_v2_hw_aud_mute(hdmi, enable); + + return 0; +} + +static const struct hdmi_codec_ops mtk_hdmi_v2_audio_codec_ops = { + .hw_params = mtk_hdmi_v2_audio_hw_params, + .audio_startup = mtk_hdmi_v2_audio_startup, + .audio_shutdown = mtk_hdmi_v2_audio_shutdown, + .mute_stream = mtk_hdmi_v2_audio_mute, + .get_eld = mtk_hdmi_audio_get_eld, + .hook_plugged_cb = mtk_hdmi_v2_audio_hook_plugged_cb, +}; + +static __maybe_unused int mtk_hdmi_v2_suspend(struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + mtk_hdmi_v2_disable(hdmi); + + return 0; +} + +static __maybe_unused int mtk_hdmi_v2_resume(struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + return mtk_hdmi_v2_enable(hdmi); +} + +static SIMPLE_DEV_PM_OPS(mtk_hdmi_v2_pm_ops, mtk_hdmi_v2_suspend, mtk_hdmi_v2_resume); + +static const struct mtk_hdmi_ver_conf mtk_hdmi_conf_v2 = { + .bridge_funcs = &mtk_v2_hdmi_bridge_funcs, + .codec_ops = &mtk_hdmi_v2_audio_codec_ops, + .mtk_hdmi_clock_names = mtk_hdmi_v2_clk_names, + .num_clocks = MTK_HDMI_V2_CLK_COUNT, + .interlace_allowed = true, +}; + +static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8188 = { + .ver_conf = &mtk_hdmi_conf_v2, + .reg_hdmi_tx_cfg = HDMITX_CONFIG_MT8188 +}; + +static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8195 = { + .ver_conf = &mtk_hdmi_conf_v2, + .reg_hdmi_tx_cfg = HDMITX_CONFIG_MT8195 +}; + +static int mtk_hdmi_v2_probe(struct platform_device *pdev) +{ + struct mtk_hdmi *hdmi; + int ret; + + /* Populate HDMI sub-devices if present */ + ret = devm_of_platform_populate(&pdev->dev); + if (ret) + return ret; + + hdmi = mtk_hdmi_common_probe(pdev); + if (IS_ERR(hdmi)) + return PTR_ERR(hdmi); + + hdmi->hpd = HDMI_PLUG_OUT; + + /* Disable all HW interrupts at probe stage */ + mtk_hdmi_v2_hwirq_disable(hdmi); + + /* + * In case bootloader leaves HDMI enabled before booting, make + * sure that any interrupt that was left is cleared by setting + * all bits in the INT_CLR registers for all 32+19 interrupts. + */ + regmap_write(hdmi->regs, TOP_INT_CLR00, GENMASK(31, 0)); + regmap_write(hdmi->regs, TOP_INT_CLR01, GENMASK(18, 0)); + + /* Restore interrupt clearing registers to zero */ + regmap_write(hdmi->regs, TOP_INT_CLR00, 0); + regmap_write(hdmi->regs, TOP_INT_CLR01, 0); + + /* + * Install the ISR but keep it disabled: as the interrupts are + * being set up in the .bridge_attach() callback which will + * enable both the right HW IRQs and the ISR. + */ + irq_set_status_flags(hdmi->irq, IRQ_NOAUTOEN); + ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, mtk_hdmi_v2_isr, + mtk_hdmi_v2_isr_thread, + IRQ_TYPE_LEVEL_HIGH, + dev_name(&pdev->dev), hdmi); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n"); + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Cannot enable Runtime PM\n"); + + return 0; +} + +static void mtk_hdmi_v2_remove(struct platform_device *pdev) +{ + struct mtk_hdmi *hdmi = platform_get_drvdata(pdev); + + i2c_put_adapter(hdmi->ddc_adpt); +} + +static const struct of_device_id mtk_drm_hdmi_v2_of_ids[] = { + { .compatible = "mediatek,mt8188-hdmi-tx", .data = &mtk_hdmi_conf_mt8188 }, + { .compatible = "mediatek,mt8195-hdmi-tx", .data = &mtk_hdmi_conf_mt8195 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_drm_hdmi_v2_of_ids); + +static struct platform_driver mtk_hdmi_v2_driver = { + .probe = mtk_hdmi_v2_probe, + .remove = mtk_hdmi_v2_remove, + .driver = { + .name = "mediatek-drm-hdmi-v2", + .of_match_table = mtk_drm_hdmi_v2_of_ids, + .pm = &mtk_hdmi_v2_pm_ops, + }, +}; +module_platform_driver(mtk_hdmi_v2_driver); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>>"); +MODULE_DESCRIPTION("MediaTek HDMIv2 Driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("DRM_MTK_HDMI"); diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 02349bd44001..5043e0377270 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -11,6 +11,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include <linux/align.h> #include "mtk_crtc.h" @@ -21,9 +22,6 @@ static const u64 modifiers[] = { DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | - AFBC_FORMAT_MOD_SPLIT | - AFBC_FORMAT_MOD_SPARSE), DRM_FORMAT_MOD_INVALID, }; @@ -71,26 +69,7 @@ static bool mtk_plane_format_mod_supported(struct drm_plane *plane, uint32_t format, uint64_t modifier) { - if (modifier == DRM_FORMAT_MOD_LINEAR) - return true; - - if (modifier != DRM_FORMAT_MOD_ARM_AFBC( - AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | - AFBC_FORMAT_MOD_SPLIT | - AFBC_FORMAT_MOD_SPARSE)) - return false; - - if (format != DRM_FORMAT_XRGB8888 && - format != DRM_FORMAT_ARGB8888 && - format != DRM_FORMAT_BGRX8888 && - format != DRM_FORMAT_BGRA8888 && - format != DRM_FORMAT_ABGR8888 && - format != DRM_FORMAT_XBGR8888 && - format != DRM_FORMAT_RGB888 && - format != DRM_FORMAT_BGR888) - return false; - - return true; + return modifier == DRM_FORMAT_MOD_LINEAR; } static void mtk_plane_destroy_state(struct drm_plane *plane, @@ -122,7 +101,8 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane, if (ret) return ret; - crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); return drm_atomic_helper_check_plane_state(plane->state, crtc_state, DRM_PLANE_NO_SCALING, diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c index 7f98de38842b..783572b16963 100644 --- a/drivers/gpu/drm/meson/meson_overlay.c +++ b/drivers/gpu/drm/meson/meson_overlay.c @@ -16,6 +16,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include "meson_overlay.h" #include "meson_registers.h" diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index b43ac61201f3..f8d0e0874a5d 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -20,6 +20,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include "meson_plane.h" #include "meson_registers.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 32cd8ac018c0..a32be27c39e8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -20,6 +20,7 @@ #include <drm/drm_managed.h> #include <drm/drm_module.h> #include <drm/drm_pciids.h> +#include <drm/drm_print.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c index f874e2949840..a5e291b344db 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200.c @@ -7,6 +7,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c index e2305f8e00f8..d2aa931f579d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c @@ -7,6 +7,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c index 11ae76eb081d..7bea7a728f56 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c @@ -6,6 +6,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh5.c b/drivers/gpu/drm/mgag200/mgag200_g200eh5.c index e2a2942a80a0..36da6529d74f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh5.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh5.c @@ -8,6 +8,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c index 23debc70dc54..8fa8fe943abf 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200er.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c @@ -7,6 +7,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c index f8796e2b7a0f..3fadbeb10af9 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c @@ -7,6 +7,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c index 31624c9ab7b7..e387a455eae5 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c @@ -6,6 +6,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c index e80da12ba1fe..a0ac19ee0353 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200se.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c @@ -7,6 +7,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c index a0e7b9ad46cd..d847fa8ded8c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c @@ -7,6 +7,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_vga.c b/drivers/gpu/drm/mgag200/mgag200_vga.c index 60568f32736d..b07c1362ddd4 100644 --- a/drivers/gpu/drm/mgag200/mgag200_vga.c +++ b/drivers/gpu/drm/mgag200/mgag200_vga.c @@ -2,6 +2,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_ddc.h" diff --git a/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c index a5a3ac108bd5..a855f1734316 100644 --- a/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c +++ b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c @@ -3,6 +3,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_ddc.h" diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 0c0dfb25f01b..8aa7d07303fb 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -24,6 +24,7 @@ adreno-y := \ adreno/a6xx_gmu.o \ adreno/a6xx_hfi.o \ adreno/a6xx_preempt.o \ + adreno/a8xx_gpu.o \ adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \ @@ -201,6 +202,7 @@ ADRENO_HEADERS = \ generated/a6xx_perfcntrs.xml.h \ generated/a7xx_enums.xml.h \ generated/a7xx_perfcntrs.xml.h \ + generated/a8xx_enums.xml.h \ generated/a6xx_gmu.xml.h \ generated/adreno_common.xml.h \ generated/adreno_pm4.xml.h \ diff --git a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c index 5ddd015f930d..e9dbf3ddf89e 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c @@ -7,6 +7,7 @@ */ #include "adreno_gpu.h" +#include "a2xx_gpu.h" static const struct adreno_info a2xx_gpus[] = { { @@ -19,7 +20,7 @@ static const struct adreno_info a2xx_gpus[] = { }, .gmem = SZ_256K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a2xx_gpu_init, + .funcs = &a2xx_gpu_funcs, }, { /* a200 on i.mx51 has only 128kib gmem */ .chip_ids = ADRENO_CHIP_IDS(0x02000001), .family = ADRENO_2XX_GEN1, @@ -30,7 +31,7 @@ static const struct adreno_info a2xx_gpus[] = { }, .gmem = SZ_128K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a2xx_gpu_init, + .funcs = &a2xx_gpu_funcs, }, { .chip_ids = ADRENO_CHIP_IDS(0x02020000), .family = ADRENO_2XX_GEN2, @@ -41,7 +42,7 @@ static const struct adreno_info a2xx_gpus[] = { }, .gmem = SZ_512K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a2xx_gpu_init, + .funcs = &a2xx_gpu_funcs, } }; DECLARE_ADRENO_GPULIST(a2xx); diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index ec38db45d8a3..1b1ee14b65cf 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -234,7 +234,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu) * word (0x20xxxx for A200, 0x220xxx for A220, 0x225xxx for A225). * Older firmware files, which lack protection support, have 0 instead. */ - if (ptr[1] == 0) { + if (ptr[1] == 0 && !a2xx_gpu->protection_disabled) { dev_warn(gpu->dev->dev, "Legacy firmware detected, disabling protection support\n"); a2xx_gpu->protection_disabled = true; @@ -486,39 +486,18 @@ static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return ring->memptrs->rptr; } -static const struct adreno_gpu_funcs funcs = { - .base = { - .get_param = adreno_get_param, - .set_param = adreno_set_param, - .hw_init = a2xx_hw_init, - .pm_suspend = msm_gpu_pm_suspend, - .pm_resume = msm_gpu_pm_resume, - .recover = a2xx_recover, - .submit = a2xx_submit, - .active_ring = adreno_active_ring, - .irq = a2xx_irq, - .destroy = a2xx_destroy, -#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) - .show = adreno_show, -#endif - .gpu_state_get = a2xx_gpu_state_get, - .gpu_state_put = adreno_gpu_state_put, - .create_vm = a2xx_create_vm, - .get_rptr = a2xx_get_rptr, - }, -}; - static const struct msm_gpu_perfcntr perfcntrs[] = { /* TODO */ }; -struct msm_gpu *a2xx_gpu_init(struct drm_device *dev) +static struct msm_gpu *a2xx_gpu_init(struct drm_device *dev) { struct a2xx_gpu *a2xx_gpu = NULL; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; + struct adreno_platform_config *config = pdev->dev.platform_data; int ret; if (!pdev) { @@ -539,7 +518,7 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev) gpu->perfcntrs = perfcntrs; gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1); if (ret) goto fail; @@ -558,3 +537,26 @@ fail: return ERR_PTR(ret); } + +const struct adreno_gpu_funcs a2xx_gpu_funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a2xx_hw_init, + .pm_suspend = msm_gpu_pm_suspend, + .pm_resume = msm_gpu_pm_resume, + .recover = a2xx_recover, + .submit = a2xx_submit, + .active_ring = adreno_active_ring, + .irq = a2xx_irq, + .destroy = a2xx_destroy, +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + .show = adreno_show, +#endif + .gpu_state_get = a2xx_gpu_state_get, + .gpu_state_put = adreno_gpu_state_put, + .create_vm = a2xx_create_vm, + .get_rptr = a2xx_get_rptr, + }, + .init = a2xx_gpu_init, +}; diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h index 53702f19990f..162ef98951f5 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h @@ -19,6 +19,8 @@ struct a2xx_gpu { }; #define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base) +extern const struct adreno_gpu_funcs a2xx_gpu_funcs; + struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu); void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, dma_addr_t *tran_error); diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c index 1498e6532f62..6ae8716fc08a 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c @@ -7,6 +7,7 @@ */ #include "adreno_gpu.h" +#include "a3xx_gpu.h" static const struct adreno_info a3xx_gpus[] = { { @@ -18,7 +19,7 @@ static const struct adreno_info a3xx_gpus[] = { }, .gmem = SZ_128K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a3xx_gpu_init, + .funcs = &a3xx_gpu_funcs, }, { .chip_ids = ADRENO_CHIP_IDS(0x03000520), .family = ADRENO_3XX, @@ -29,7 +30,7 @@ static const struct adreno_info a3xx_gpus[] = { }, .gmem = SZ_256K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a3xx_gpu_init, + .funcs = &a3xx_gpu_funcs, }, { .chip_ids = ADRENO_CHIP_IDS(0x03000600), .family = ADRENO_3XX, @@ -40,7 +41,7 @@ static const struct adreno_info a3xx_gpus[] = { }, .gmem = SZ_128K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a3xx_gpu_init, + .funcs = &a3xx_gpu_funcs, }, { .chip_ids = ADRENO_CHIP_IDS(0x03000620), .family = ADRENO_3XX, @@ -51,7 +52,7 @@ static const struct adreno_info a3xx_gpus[] = { }, .gmem = SZ_128K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a3xx_gpu_init, + .funcs = &a3xx_gpu_funcs, }, { .chip_ids = ADRENO_CHIP_IDS( 0x03020000, @@ -66,7 +67,7 @@ static const struct adreno_info a3xx_gpus[] = { }, .gmem = SZ_512K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a3xx_gpu_init, + .funcs = &a3xx_gpu_funcs, }, { .chip_ids = ADRENO_CHIP_IDS( 0x03030000, @@ -81,7 +82,7 @@ static const struct adreno_info a3xx_gpus[] = { }, .gmem = SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a3xx_gpu_init, + .funcs = &a3xx_gpu_funcs, } }; DECLARE_ADRENO_GPULIST(a3xx); diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index a956cd79195e..f22d33e99e81 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -508,29 +508,6 @@ static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return ring->memptrs->rptr; } -static const struct adreno_gpu_funcs funcs = { - .base = { - .get_param = adreno_get_param, - .set_param = adreno_set_param, - .hw_init = a3xx_hw_init, - .pm_suspend = msm_gpu_pm_suspend, - .pm_resume = msm_gpu_pm_resume, - .recover = a3xx_recover, - .submit = a3xx_submit, - .active_ring = adreno_active_ring, - .irq = a3xx_irq, - .destroy = a3xx_destroy, -#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) - .show = adreno_show, -#endif - .gpu_busy = a3xx_gpu_busy, - .gpu_state_get = a3xx_gpu_state_get, - .gpu_state_put = adreno_gpu_state_put, - .create_vm = adreno_create_vm, - .get_rptr = a3xx_get_rptr, - }, -}; - static const struct msm_gpu_perfcntr perfcntrs[] = { { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO, SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" }, @@ -538,13 +515,14 @@ static const struct msm_gpu_perfcntr perfcntrs[] = { SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" }, }; -struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) +static struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) { struct a3xx_gpu *a3xx_gpu = NULL; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; + struct adreno_platform_config *config = pdev->dev.platform_data; struct icc_path *ocmem_icc_path; struct icc_path *icc_path; int ret; @@ -569,7 +547,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) adreno_gpu->registers = a3xx_registers; - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1); if (ret) goto fail; @@ -613,3 +591,27 @@ fail: return ERR_PTR(ret); } + +const struct adreno_gpu_funcs a3xx_gpu_funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a3xx_hw_init, + .pm_suspend = msm_gpu_pm_suspend, + .pm_resume = msm_gpu_pm_resume, + .recover = a3xx_recover, + .submit = a3xx_submit, + .active_ring = adreno_active_ring, + .irq = a3xx_irq, + .destroy = a3xx_destroy, +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + .show = adreno_show, +#endif + .gpu_busy = a3xx_gpu_busy, + .gpu_state_get = a3xx_gpu_state_get, + .gpu_state_put = adreno_gpu_state_put, + .create_vm = adreno_create_vm, + .get_rptr = a3xx_get_rptr, + }, + .init = a3xx_gpu_init, +}; diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h index c555fb13e0d7..3d4ec9dbd918 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h @@ -23,4 +23,6 @@ struct a3xx_gpu { }; #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) +extern const struct adreno_gpu_funcs a3xx_gpu_funcs; + #endif /* __A3XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c index 09f9f228b75e..9192586f7ef0 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c @@ -7,6 +7,7 @@ */ #include "adreno_gpu.h" +#include "a4xx_gpu.h" static const struct adreno_info a4xx_gpus[] = { { @@ -19,7 +20,7 @@ static const struct adreno_info a4xx_gpus[] = { }, .gmem = SZ_256K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a4xx_gpu_init, + .funcs = &a4xx_gpu_funcs, }, { .chip_ids = ADRENO_CHIP_IDS(0x04020000), .family = ADRENO_4XX, @@ -30,7 +31,7 @@ static const struct adreno_info a4xx_gpus[] = { }, .gmem = (SZ_1M + SZ_512K), .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a4xx_gpu_init, + .funcs = &a4xx_gpu_funcs, }, { .chip_ids = ADRENO_CHIP_IDS(0x04030002), .family = ADRENO_4XX, @@ -41,7 +42,7 @@ static const struct adreno_info a4xx_gpus[] = { }, .gmem = (SZ_1M + SZ_512K), .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a4xx_gpu_init, + .funcs = &a4xx_gpu_funcs, } }; DECLARE_ADRENO_GPULIST(a4xx); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 83f6329accba..db06c06067ae 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -627,37 +627,14 @@ static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return ring->memptrs->rptr; } -static const struct adreno_gpu_funcs funcs = { - .base = { - .get_param = adreno_get_param, - .set_param = adreno_set_param, - .hw_init = a4xx_hw_init, - .pm_suspend = a4xx_pm_suspend, - .pm_resume = a4xx_pm_resume, - .recover = a4xx_recover, - .submit = a4xx_submit, - .active_ring = adreno_active_ring, - .irq = a4xx_irq, - .destroy = a4xx_destroy, -#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) - .show = adreno_show, -#endif - .gpu_busy = a4xx_gpu_busy, - .gpu_state_get = a4xx_gpu_state_get, - .gpu_state_put = adreno_gpu_state_put, - .create_vm = adreno_create_vm, - .get_rptr = a4xx_get_rptr, - }, - .get_timestamp = a4xx_get_timestamp, -}; - -struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) +static struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) { struct a4xx_gpu *a4xx_gpu = NULL; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; + struct adreno_platform_config *config = pdev->dev.platform_data; struct icc_path *ocmem_icc_path; struct icc_path *icc_path; int ret; @@ -680,7 +657,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) gpu->perfcntrs = NULL; gpu->num_perfcntrs = 0; - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1); if (ret) goto fail; @@ -726,3 +703,28 @@ fail: return ERR_PTR(ret); } + +const struct adreno_gpu_funcs a4xx_gpu_funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a4xx_hw_init, + .pm_suspend = a4xx_pm_suspend, + .pm_resume = a4xx_pm_resume, + .recover = a4xx_recover, + .submit = a4xx_submit, + .active_ring = adreno_active_ring, + .irq = a4xx_irq, + .destroy = a4xx_destroy, +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + .show = adreno_show, +#endif + .gpu_busy = a4xx_gpu_busy, + .gpu_state_get = a4xx_gpu_state_get, + .gpu_state_put = adreno_gpu_state_put, + .create_vm = adreno_create_vm, + .get_rptr = a4xx_get_rptr, + }, + .init = a4xx_gpu_init, + .get_timestamp = a4xx_get_timestamp, +}; diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h index a01448cba2ea..71b164439f62 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h @@ -20,4 +20,6 @@ struct a4xx_gpu { }; #define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base) +extern const struct adreno_gpu_funcs a4xx_gpu_funcs; + #endif /* __A4XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c index b48a636d8237..babd320f3b73 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c @@ -7,6 +7,7 @@ */ #include "adreno_gpu.h" +#include "a5xx_gpu.h" static const struct adreno_info a5xx_gpus[] = { { @@ -21,7 +22,7 @@ static const struct adreno_info a5xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | ADRENO_QUIRK_LMLOADKILL_DISABLE, - .init = a5xx_gpu_init, + .funcs = &a5xx_gpu_funcs, }, { .chip_ids = ADRENO_CHIP_IDS(0x05000600), .family = ADRENO_5XX, @@ -38,7 +39,7 @@ static const struct adreno_info a5xx_gpus[] = { .inactive_period = 250, .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | ADRENO_QUIRK_LMLOADKILL_DISABLE, - .init = a5xx_gpu_init, + .funcs = &a5xx_gpu_funcs, .zapfw = "a506_zap.mdt", }, { .chip_ids = ADRENO_CHIP_IDS(0x05000800), @@ -55,7 +56,7 @@ static const struct adreno_info a5xx_gpus[] = { */ .inactive_period = 250, .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, - .init = a5xx_gpu_init, + .funcs = &a5xx_gpu_funcs, .zapfw = "a508_zap.mdt", }, { .chip_ids = ADRENO_CHIP_IDS(0x05000900), @@ -72,7 +73,7 @@ static const struct adreno_info a5xx_gpus[] = { */ .inactive_period = 250, .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, - .init = a5xx_gpu_init, + .funcs = &a5xx_gpu_funcs, /* Adreno 509 uses the same ZAP as 512 */ .zapfw = "a512_zap.mdt", }, { @@ -89,7 +90,7 @@ static const struct adreno_info a5xx_gpus[] = { * the GDSC which appears to make it grumpy */ .inactive_period = 250, - .init = a5xx_gpu_init, + .funcs = &a5xx_gpu_funcs, }, { .chip_ids = ADRENO_CHIP_IDS(0x05010200), .family = ADRENO_5XX, @@ -105,7 +106,7 @@ static const struct adreno_info a5xx_gpus[] = { */ .inactive_period = 250, .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, - .init = a5xx_gpu_init, + .funcs = &a5xx_gpu_funcs, .zapfw = "a512_zap.mdt", }, { .chip_ids = ADRENO_CHIP_IDS( @@ -127,7 +128,7 @@ static const struct adreno_info a5xx_gpus[] = { .inactive_period = 250, .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | ADRENO_QUIRK_FAULT_DETECT_MASK, - .init = a5xx_gpu_init, + .funcs = &a5xx_gpu_funcs, .zapfw = "a530_zap.mdt", }, { .chip_ids = ADRENO_CHIP_IDS(0x05040001), @@ -145,7 +146,7 @@ static const struct adreno_info a5xx_gpus[] = { */ .inactive_period = 250, .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, - .init = a5xx_gpu_init, + .funcs = &a5xx_gpu_funcs, .zapfw = "a540_zap.mdt", } }; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 4a04dc43a8e6..56eaff2ee4e4 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1691,34 +1691,6 @@ static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR); } -static const struct adreno_gpu_funcs funcs = { - .base = { - .get_param = adreno_get_param, - .set_param = adreno_set_param, - .hw_init = a5xx_hw_init, - .ucode_load = a5xx_ucode_load, - .pm_suspend = a5xx_pm_suspend, - .pm_resume = a5xx_pm_resume, - .recover = a5xx_recover, - .submit = a5xx_submit, - .active_ring = a5xx_active_ring, - .irq = a5xx_irq, - .destroy = a5xx_destroy, -#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) - .show = a5xx_show, -#endif -#if defined(CONFIG_DEBUG_FS) - .debugfs_init = a5xx_debugfs_init, -#endif - .gpu_busy = a5xx_gpu_busy, - .gpu_state_get = a5xx_gpu_state_get, - .gpu_state_put = a5xx_gpu_state_put, - .create_vm = adreno_create_vm, - .get_rptr = a5xx_get_rptr, - }, - .get_timestamp = a5xx_get_timestamp, -}; - static void check_speed_bin(struct device *dev) { struct nvmem_cell *cell; @@ -1751,7 +1723,7 @@ static void check_speed_bin(struct device *dev) devm_pm_opp_set_supported_hw(dev, &val, 1); } -struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) +static struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; @@ -1781,7 +1753,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) if (config->info->revn == 510) nr_rings = 1; - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, nr_rings); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); @@ -1806,3 +1778,32 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) return gpu; } + +const struct adreno_gpu_funcs a5xx_gpu_funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a5xx_hw_init, + .ucode_load = a5xx_ucode_load, + .pm_suspend = a5xx_pm_suspend, + .pm_resume = a5xx_pm_resume, + .recover = a5xx_recover, + .submit = a5xx_submit, + .active_ring = a5xx_active_ring, + .irq = a5xx_irq, + .destroy = a5xx_destroy, +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + .show = a5xx_show, +#endif +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = a5xx_debugfs_init, +#endif + .gpu_busy = a5xx_gpu_busy, + .gpu_state_get = a5xx_gpu_state_get, + .gpu_state_put = a5xx_gpu_state_put, + .create_vm = adreno_create_vm, + .get_rptr = a5xx_get_rptr, + }, + .init = a5xx_gpu_init, + .get_timestamp = a5xx_get_timestamp, +}; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 9c0d701fe4b8..407bb950d350 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -133,6 +133,7 @@ struct a5xx_preempt_record { */ #define A5XX_PREEMPT_COUNTER_SIZE (16 * 4) +extern const struct adreno_gpu_funcs a5xx_gpu_funcs; int a5xx_power_init(struct msm_gpu *gpu); void a5xx_gpmu_ucode_init(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index 44df6410bce1..29107b362346 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -672,6 +672,14 @@ static const u32 a690_protect_regs[] = { }; DECLARE_ADRENO_PROTECT(a690_protect, 48); +static const struct adreno_reglist a640_gbif[] = { + { REG_A6XX_GBIF_QSB_SIDE0, 0x00071620 }, + { REG_A6XX_GBIF_QSB_SIDE1, 0x00071620 }, + { REG_A6XX_GBIF_QSB_SIDE2, 0x00071620 }, + { REG_A6XX_GBIF_QSB_SIDE3, 0x00071620 }, + { }, +}; + static const struct adreno_info a6xx_gpus[] = { { .chip_ids = ADRENO_CHIP_IDS(0x06010000), @@ -683,11 +691,12 @@ static const struct adreno_info a6xx_gpus[] = { .gmem = (SZ_128K + SZ_4K), .quirks = ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a6xx_gpu_init, + .funcs = &a6xx_gmuwrapper_funcs, .zapfw = "a610_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a612_hwcg, .protect = &a630_protect, + .gbif_cx = a640_gbif, .gmu_cgc_mode = 0x00020202, .prim_fifo_threshold = 0x00080000, }, @@ -706,6 +715,22 @@ static const struct adreno_info a6xx_gpus[] = { { 127, 4 }, ), }, { + .chip_ids = ADRENO_CHIP_IDS(0x06010200), + .family = ADRENO_6XX_GEN1, + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a612_rgmu.bin", + }, + .gmem = (SZ_128K + SZ_4K), + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .funcs = &a6xx_gmuwrapper_funcs, + .a6xx = &(const struct a6xx_info) { + .hwcg = a612_hwcg, + .protect = &a630_protect, + .gmu_cgc_mode = 0x00000022, + .prim_fifo_threshold = 0x00080000, + }, + }, { .chip_ids = ADRENO_CHIP_IDS(0x06010500), .family = ADRENO_6XX_GEN1, .revn = 615, @@ -716,7 +741,7 @@ static const struct adreno_info a6xx_gpus[] = { .gmem = SZ_512K, .quirks = ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a615_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a615_hwcg, @@ -747,7 +772,7 @@ static const struct adreno_info a6xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_4GB_VA, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a615_zap.mbn", .a6xx = &(const struct a6xx_info) { .hwcg = a615_hwcg, @@ -774,7 +799,7 @@ static const struct adreno_info a6xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_4GB_VA, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .a6xx = &(const struct a6xx_info) { .protect = &a630_protect, .gmu_cgc_mode = 0x00000222, @@ -797,7 +822,7 @@ static const struct adreno_info a6xx_gpus[] = { .gmem = SZ_512K, .quirks = ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a615_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a615_hwcg, @@ -822,7 +847,7 @@ static const struct adreno_info a6xx_gpus[] = { .gmem = SZ_512K, .quirks = ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a615_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a615_hwcg, @@ -847,7 +872,7 @@ static const struct adreno_info a6xx_gpus[] = { .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a615_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a615_hwcg, @@ -873,11 +898,12 @@ static const struct adreno_info a6xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a620_zap.mbn", .a6xx = &(const struct a6xx_info) { .hwcg = a620_hwcg, .protect = &a650_protect, + .gbif_cx = a640_gbif, .gmu_cgc_mode = 0x00020200, .prim_fifo_threshold = 0x00010000, }, @@ -896,10 +922,11 @@ static const struct adreno_info a6xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .a6xx = &(const struct a6xx_info) { .hwcg = a690_hwcg, .protect = &a650_protect, + .gbif_cx = a640_gbif, .gmu_cgc_mode = 0x00020200, .prim_fifo_threshold = 0x00010000, .bcms = (const struct a6xx_bcm[]) { @@ -933,7 +960,7 @@ static const struct adreno_info a6xx_gpus[] = { .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a630_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a630_hwcg, @@ -953,7 +980,7 @@ static const struct adreno_info a6xx_gpus[] = { .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a640_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a640_hwcg, @@ -977,11 +1004,12 @@ static const struct adreno_info a6xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a650_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a650_hwcg, .protect = &a650_protect, + .gbif_cx = a640_gbif, .gmu_cgc_mode = 0x00020202, .prim_fifo_threshold = 0x00300200, }, @@ -1003,11 +1031,12 @@ static const struct adreno_info a6xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a660_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a660_hwcg, .protect = &a660_protect, + .gbif_cx = a640_gbif, .gmu_cgc_mode = 0x00020000, .prim_fifo_threshold = 0x00300200, }, @@ -1022,10 +1051,11 @@ static const struct adreno_info a6xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .a6xx = &(const struct a6xx_info) { .hwcg = a690_hwcg, .protect = &a660_protect, + .gbif_cx = a640_gbif, .gmu_cgc_mode = 0x00020200, .prim_fifo_threshold = 0x00300200, }, @@ -1045,11 +1075,12 @@ static const struct adreno_info a6xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a660_zap.mbn", .a6xx = &(const struct a6xx_info) { .hwcg = a660_hwcg, .protect = &a660_protect, + .gbif_cx = a640_gbif, .gmu_cgc_mode = 0x00020202, .prim_fifo_threshold = 0x00200200, }, @@ -1072,7 +1103,7 @@ static const struct adreno_info a6xx_gpus[] = { .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a640_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a640_hwcg, @@ -1091,11 +1122,12 @@ static const struct adreno_info a6xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV, - .init = a6xx_gpu_init, + .funcs = &a6xx_gpu_funcs, .zapfw = "a690_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a690_hwcg, .protect = &a690_protect, + .gbif_cx = a640_gbif, .gmu_cgc_mode = 0x00020200, .prim_fifo_threshold = 0x00800200, }, @@ -1426,11 +1458,12 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = SZ_128K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_HW_APRIV, - .init = a6xx_gpu_init, + .funcs = &a6xx_gmuwrapper_funcs, .zapfw = "a702_zap.mbn", .a6xx = &(const struct a6xx_info) { .hwcg = a702_hwcg, .protect = &a650_protect, + .gbif_cx = a640_gbif, .gmu_cgc_mode = 0x00020202, .prim_fifo_threshold = 0x0000c000, }, @@ -1452,12 +1485,13 @@ static const struct adreno_info a7xx_gpus[] = { .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV | ADRENO_QUIRK_PREEMPTION, - .init = a6xx_gpu_init, + .funcs = &a7xx_gpu_funcs, .zapfw = "a730_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a730_hwcg, .protect = &a730_protect, .pwrup_reglist = &a7xx_pwrup_reglist, + .gbif_cx = a640_gbif, .gmu_cgc_mode = 0x00020000, }, .preempt_record_size = 2860 * SZ_1K, @@ -1473,12 +1507,13 @@ static const struct adreno_info a7xx_gpus[] = { .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV | ADRENO_QUIRK_PREEMPTION, - .init = a6xx_gpu_init, + .funcs = &a7xx_gpu_funcs, .zapfw = "a740_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a740_hwcg, .protect = &a730_protect, .pwrup_reglist = &a7xx_pwrup_reglist, + .gbif_cx = a640_gbif, .gmu_chipid = 0x7020100, .gmu_cgc_mode = 0x00020202, .bcms = (const struct a6xx_bcm[]) { @@ -1507,12 +1542,13 @@ static const struct adreno_info a7xx_gpus[] = { ADRENO_QUIRK_HAS_HW_APRIV | ADRENO_QUIRK_PREEMPTION | ADRENO_QUIRK_IFPC, - .init = a6xx_gpu_init, + .funcs = &a7xx_gpu_funcs, .a6xx = &(const struct a6xx_info) { .hwcg = a740_hwcg, .protect = &a730_protect, .pwrup_reglist = &a7xx_pwrup_reglist, .ifpc_reglist = &a750_ifpc_reglist, + .gbif_cx = a640_gbif, .gmu_chipid = 0x7050001, .gmu_cgc_mode = 0x00020202, .bcms = (const struct a6xx_bcm[]) { @@ -1548,12 +1584,13 @@ static const struct adreno_info a7xx_gpus[] = { ADRENO_QUIRK_HAS_HW_APRIV | ADRENO_QUIRK_PREEMPTION | ADRENO_QUIRK_IFPC, - .init = a6xx_gpu_init, + .funcs = &a7xx_gpu_funcs, .zapfw = "gen70900_zap.mbn", .a6xx = &(const struct a6xx_info) { .protect = &a730_protect, .pwrup_reglist = &a7xx_pwrup_reglist, .ifpc_reglist = &a750_ifpc_reglist, + .gbif_cx = a640_gbif, .gmu_chipid = 0x7090100, .gmu_cgc_mode = 0x00020202, .bcms = (const struct a6xx_bcm[]) { @@ -1581,11 +1618,12 @@ static const struct adreno_info a7xx_gpus[] = { .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV | ADRENO_QUIRK_PREEMPTION, - .init = a6xx_gpu_init, + .funcs = &a7xx_gpu_funcs, .a6xx = &(const struct a6xx_info) { .hwcg = a740_hwcg, .protect = &a730_protect, .pwrup_reglist = &a7xx_pwrup_reglist, + .gbif_cx = a640_gbif, .gmu_chipid = 0x70f0000, .gmu_cgc_mode = 0x00020222, .bcms = (const struct a6xx_bcm[]) { @@ -1612,6 +1650,306 @@ static const struct adreno_info a7xx_gpus[] = { }; DECLARE_ADRENO_GPULIST(a7xx); +static const struct adreno_reglist_pipe x285_nonctxt_regs[] = { + { REG_A8XX_CP_SMMU_STREAM_ID_LPAC, 0x00000101, BIT(PIPE_NONE) }, + { REG_A8XX_GRAS_DBG_ECO_CNTL, 0x00000800, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_GRAS_TSEFE_DBG_ECO_CNTL, 0x00200000, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A6XX_PC_AUTO_VERTEX_STRIDE, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_VIS_STREAM_CNTL, 0x10010000, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CONTEXT_SWITCH_STABILIZE_CNTL_1, 0x00000002, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_1, 0x00000003, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_2, 0x00000200, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_3, 0x00500000, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_4, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_RB_GC_GMEM_PROTECT, 0x15000000, BIT(PIPE_BR) }, + { REG_A8XX_RB_RESOLVE_PREFETCH_CNTL, 0x00000007, BIT(PIPE_BR) }, + { REG_A8XX_RB_CMP_DBG_ECO_CNTL, 0x00004000, BIT(PIPE_BR) }, + { REG_A8XX_RBBM_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_SLICE_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL, 0x00000030, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL2, 0x00000030, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_INTERFACE_HANG_INT_CNTL, 0x0fffffff, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x22122212, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_CGC_P2S_CNTL, 0x00000040, BIT(PIPE_NONE) }, + { REG_A7XX_SP_CHICKEN_BITS_2, 0x00820800, BIT(PIPE_NONE) }, + { REG_A7XX_SP_CHICKEN_BITS_3, 0x00300000, BIT(PIPE_NONE) }, + { REG_A6XX_SP_PERFCTR_SHADER_MASK, 0x0000003f, BIT(PIPE_NONE) }, + /* Disable CS dead batch merge */ + { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_2, BIT(31), BIT(PIPE_NONE) }, + { REG_A7XX_SP_HLSQ_TIMEOUT_THRESHOLD_DP, 0x00000080, BIT(PIPE_NONE) }, + { REG_A7XX_SP_READ_SEL, 0x0001ff00, BIT(PIPE_NONE) }, + { REG_A6XX_TPL1_DBG_ECO_CNTL, 0x10000000, BIT(PIPE_NONE) }, + /* BIT(26): Disable final clamp for bicubic filtering */ + { REG_A6XX_TPL1_DBG_ECO_CNTL1, 0x00000720, BIT(PIPE_NONE) }, + { REG_A6XX_UCHE_MODE_CNTL, 0x80080000, BIT(PIPE_NONE) }, + { REG_A8XX_UCHE_CCHE_MODE_CNTL, 0x00001000, BIT(PIPE_NONE) }, + { REG_A8XX_UCHE_CCHE_CACHE_WAYS, 0x00000800, BIT(PIPE_NONE) }, + { REG_A8XX_UCHE_GBIF_GX_CONFIG, 0x010240e0, BIT(PIPE_NONE) }, + { REG_A8XX_UCHE_VARB_IDLE_TIMEOUT, 0x00000020, BIT(PIPE_NONE) }, + { REG_A7XX_VFD_DBG_ECO_CNTL, 0x00008000, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BV_THRESHOLD, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BR_THRESHOLD, 0x00600060, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BUSY_REQ_CNT, 0x00200020, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_LP_REQ_CNT, 0x00000020, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VPC_FLATSHADE_MODE_CNTL, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { }, +}; + +static const u32 x285_protect_regs[] = { + A6XX_PROTECT_RDONLY(0x00008, 0x039b), + A6XX_PROTECT_RDONLY(0x003b4, 0x008b), + A6XX_PROTECT_NORDWR(0x00440, 0x001f), + A6XX_PROTECT_RDONLY(0x00580, 0x005f), + A6XX_PROTECT_NORDWR(0x005e0, 0x011f), + A6XX_PROTECT_RDONLY(0x0074a, 0x0005), + A6XX_PROTECT_RDONLY(0x00759, 0x0026), + A6XX_PROTECT_RDONLY(0x00789, 0x0000), + A6XX_PROTECT_RDONLY(0x0078c, 0x0013), + A6XX_PROTECT_NORDWR(0x00800, 0x0029), + A6XX_PROTECT_NORDWR(0x0082c, 0x0000), + A6XX_PROTECT_NORDWR(0x00837, 0x00af), + A6XX_PROTECT_RDONLY(0x008e7, 0x00c9), + A6XX_PROTECT_NORDWR(0x008ec, 0x00c3), + A6XX_PROTECT_NORDWR(0x009b1, 0x0250), + A6XX_PROTECT_RDONLY(0x00ce0, 0x0001), + A6XX_PROTECT_RDONLY(0x00df0, 0x0000), + A6XX_PROTECT_NORDWR(0x00df1, 0x0000), + A6XX_PROTECT_NORDWR(0x00e01, 0x0000), + A6XX_PROTECT_NORDWR(0x00e03, 0x1fff), + A6XX_PROTECT_NORDWR(0x03c00, 0x00c5), + A6XX_PROTECT_RDONLY(0x03cc6, 0x0039), + A6XX_PROTECT_NORDWR(0x03d00, 0x1fff), + A6XX_PROTECT_NORDWR(0x08600, 0x01ff), + A6XX_PROTECT_NORDWR(0x08e00, 0x00ff), + A6XX_PROTECT_RDONLY(0x08f00, 0x0000), + A6XX_PROTECT_NORDWR(0x08f01, 0x01be), + A6XX_PROTECT_NORDWR(0x09600, 0x01ff), + A6XX_PROTECT_RDONLY(0x0981a, 0x02e5), + A6XX_PROTECT_NORDWR(0x09e00, 0x01ff), + A6XX_PROTECT_NORDWR(0x0a600, 0x01ff), + A6XX_PROTECT_NORDWR(0x0a82e, 0x0000), + A6XX_PROTECT_NORDWR(0x0ae00, 0x0006), + A6XX_PROTECT_NORDWR(0x0ae08, 0x0006), + A6XX_PROTECT_NORDWR(0x0ae10, 0x00bf), + A6XX_PROTECT_RDONLY(0x0aed0, 0x002f), + A6XX_PROTECT_NORDWR(0x0af00, 0x027f), + A6XX_PROTECT_NORDWR(0x0b600, 0x1fff), + A6XX_PROTECT_NORDWR(0x0dc00, 0x1fff), + A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff), + A6XX_PROTECT_NORDWR(0x18400, 0x003f), + A6XX_PROTECT_RDONLY(0x18440, 0x013f), + A6XX_PROTECT_NORDWR(0x18580, 0x1fff), + A6XX_PROTECT_NORDWR(0x1b400, 0x1fff), + A6XX_PROTECT_NORDWR(0x1f400, 0x0477), + A6XX_PROTECT_RDONLY(0x1f878, 0x0507), + A6XX_PROTECT_NORDWR(0x1f930, 0x0329), + A6XX_PROTECT_NORDWR(0x1fd80, 0x1fff), + A6XX_PROTECT_NORDWR(0x27800, 0x007f), + A6XX_PROTECT_RDONLY(0x27880, 0x0385), + A6XX_PROTECT_NORDWR(0x27882, 0x000a), + A6XX_PROTECT_NORDWR(0x27c06, 0x0000), +}; + +DECLARE_ADRENO_PROTECT(x285_protect, 64); + +static const struct adreno_reglist_pipe a840_nonctxt_regs[] = { + { REG_A8XX_CP_SMMU_STREAM_ID_LPAC, 0x00000101, BIT(PIPE_NONE) }, + { REG_A8XX_GRAS_DBG_ECO_CNTL, 0x00000800, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_GRAS_TSEFE_DBG_ECO_CNTL, 0x00200000, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A6XX_PC_AUTO_VERTEX_STRIDE, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_VIS_STREAM_CNTL, 0x10010000, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CONTEXT_SWITCH_STABILIZE_CNTL_1, 0x00000002, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_1, 0x00000003, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_2, 0x00000200, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_3, 0x00500000, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_4, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) }, + /* Disable Dead Draw Merge scheme on RB-HLSQ */ + { REG_A6XX_RB_RBP_CNTL, BIT(5), BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A7XX_RB_CCU_CNTL, 0x00000068, BIT(PIPE_BR) }, + /* Partially enable perf clear, Disable DINT to c/z be data forwarding */ + { REG_A7XX_RB_CCU_DBG_ECO_CNTL, 0x00002200, BIT(PIPE_BR) }, + { REG_A8XX_RB_GC_GMEM_PROTECT, 0x12000000, BIT(PIPE_BR) }, + { REG_A8XX_RB_RESOLVE_PREFETCH_CNTL, 0x00000007, BIT(PIPE_BR) }, + { REG_A8XX_RB_CMP_DBG_ECO_CNTL, 0x00004000, BIT(PIPE_BR) }, + { REG_A8XX_RBBM_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_SLICE_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_POWER_UP_RESET_SW_OVERRIDE, 0x70809060, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_POWER_UP_RESET_SW_BV_OVERRIDE, 0x30000000, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL, 0x00000030, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL2, 0x00000030, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_INTERFACE_HANG_INT_CNTL, 0x0fffffff, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x22122212, BIT(PIPE_NONE) }, + { REG_A8XX_RBBM_CGC_P2S_CNTL, 0x00000040, BIT(PIPE_NONE) }, + /* Disable mode_switch optimization in UMAS */ + { REG_A6XX_SP_CHICKEN_BITS, BIT(24) | BIT(26), BIT(PIPE_NONE) }, + /* Disable LPAC large-LM mode */ + { REG_A8XX_SP_SS_CHICKEN_BITS_0, BIT(3), BIT(PIPE_NONE) }, + /* Disable PS out of order retire */ + { REG_A7XX_SP_CHICKEN_BITS_2, 0x00c21800, BIT(PIPE_NONE) }, + { REG_A7XX_SP_CHICKEN_BITS_3, 0x00300000, BIT(PIPE_NONE) }, + /* Disable SP2TP info attribute */ + { REG_A8XX_SP_CHICKEN_BITS_4, 0x00000002, BIT(PIPE_NONE) }, + { REG_A6XX_SP_PERFCTR_SHADER_MASK, 0x0000003f, BIT(PIPE_NONE) }, + { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL, BIT(14), BIT(PIPE_NONE) }, + /* Ignore HLSQ shared constant feedback from SP */ + { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_1, BIT(17), BIT(PIPE_NONE) }, + /* Disable CS dead batch merge */ + { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_2, BIT(24), BIT(PIPE_NONE) }, + { REG_A8XX_SP_HLSQ_DBG_ECO_CNTL_3, BIT(7), BIT(PIPE_NONE) }, + { REG_A7XX_SP_HLSQ_TIMEOUT_THRESHOLD_DP, 0x00000080, BIT(PIPE_NONE) }, + { REG_A7XX_SP_READ_SEL, 0x0001ff00, BIT(PIPE_NONE) }, + { REG_A6XX_TPL1_DBG_ECO_CNTL, 0x10100000, BIT(PIPE_NONE) }, + /* BIT(26): Disable final clamp for bicubic filtering */ + { REG_A6XX_TPL1_DBG_ECO_CNTL1, 0x04000720, BIT(PIPE_NONE) }, + { REG_A6XX_UCHE_MODE_CNTL, 0x80080000, BIT(PIPE_NONE) }, + { REG_A8XX_UCHE_CCHE_MODE_CNTL, 0x00001000, BIT(PIPE_NONE) }, + { REG_A8XX_UCHE_CCHE_CACHE_WAYS, 0x00000800, BIT(PIPE_NONE) }, + { REG_A8XX_UCHE_GBIF_GX_CONFIG, 0x010240e0, BIT(PIPE_NONE) }, + { REG_A8XX_UCHE_VARB_IDLE_TIMEOUT, 0x00000020, BIT(PIPE_NONE) }, + { REG_A7XX_VFD_DBG_ECO_CNTL, 0x00008000, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BV_THRESHOLD, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BR_THRESHOLD, 0x00600060, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BUSY_REQ_CNT, 0x00200020, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_LP_REQ_CNT, 0x00000020, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VPC_FLATSHADE_MODE_CNTL, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { }, +}; + +static const u32 a840_protect_regs[] = { + A6XX_PROTECT_RDONLY(0x00008, 0x039b), + A6XX_PROTECT_RDONLY(0x003b4, 0x008b), + A6XX_PROTECT_NORDWR(0x00440, 0x001f), + A6XX_PROTECT_RDONLY(0x00580, 0x005f), + A6XX_PROTECT_NORDWR(0x005e0, 0x011f), + A6XX_PROTECT_RDONLY(0x0074a, 0x0005), + A6XX_PROTECT_RDONLY(0x00759, 0x001b), + A6XX_PROTECT_NORDWR(0x00775, 0x000a), + A6XX_PROTECT_RDONLY(0x00789, 0x0000), + A6XX_PROTECT_RDONLY(0x0078c, 0x0013), + A6XX_PROTECT_NORDWR(0x00800, 0x0029), + A6XX_PROTECT_NORDWR(0x00837, 0x00af), + A6XX_PROTECT_RDONLY(0x008e7, 0x00c9), + A6XX_PROTECT_NORDWR(0x008ec, 0x00c3), + A6XX_PROTECT_NORDWR(0x009b1, 0x0250), + A6XX_PROTECT_NORDWR(0x00c07, 0x0008), + A6XX_PROTECT_RDONLY(0x00ce0, 0x0001), + A6XX_PROTECT_RDONLY(0x00df0, 0x0000), + A6XX_PROTECT_NORDWR(0x00df1, 0x0000), + A6XX_PROTECT_NORDWR(0x00e01, 0x0000), + A6XX_PROTECT_NORDWR(0x00e03, 0x1fff), + A6XX_PROTECT_NORDWR(0x03c00, 0x00c5), + A6XX_PROTECT_RDONLY(0x03cc6, 0x0039), + A6XX_PROTECT_NORDWR(0x03d00, 0x1fff), + A6XX_PROTECT_NORDWR(0x08600, 0x01ff), + A6XX_PROTECT_NORDWR(0x08e00, 0x00ff), + A6XX_PROTECT_RDONLY(0x08f00, 0x0000), + A6XX_PROTECT_NORDWR(0x08f01, 0x01be), + A6XX_PROTECT_NORDWR(0x09600, 0x01ff), + A6XX_PROTECT_RDONLY(0x0981a, 0x02e5), + A6XX_PROTECT_NORDWR(0x09e00, 0x01ff), + A6XX_PROTECT_NORDWR(0x0a600, 0x01ff), + A6XX_PROTECT_NORDWR(0x0a82e, 0x0000), + A6XX_PROTECT_NORDWR(0x0ae00, 0x0000), + A6XX_PROTECT_NORDWR(0x0ae02, 0x0004), + A6XX_PROTECT_NORDWR(0x0ae08, 0x0006), + A6XX_PROTECT_NORDWR(0x0ae10, 0x00bf), + A6XX_PROTECT_RDONLY(0x0aed0, 0x002f), + A6XX_PROTECT_NORDWR(0x0af00, 0x027f), + A6XX_PROTECT_NORDWR(0x0b600, 0x1fff), + A6XX_PROTECT_NORDWR(0x0dc00, 0x1fff), + A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff), + A6XX_PROTECT_NORDWR(0x18400, 0x003f), + A6XX_PROTECT_RDONLY(0x18440, 0x013f), + A6XX_PROTECT_NORDWR(0x18580, 0x1fff), + A6XX_PROTECT_NORDWR(0x1b400, 0x1fff), + A6XX_PROTECT_NORDWR(0x1f400, 0x0477), + A6XX_PROTECT_RDONLY(0x1f878, 0x0507), + A6XX_PROTECT_NORDWR(0x1f930, 0x0329), + A6XX_PROTECT_NORDWR(0x1fd80, 0x1fff), + A6XX_PROTECT_NORDWR(0x27800, 0x007f), + A6XX_PROTECT_RDONLY(0x27880, 0x0385), + A6XX_PROTECT_NORDWR(0x27882, 0x0009), + A6XX_PROTECT_NORDWR(0x27c06, 0x0000), +}; +DECLARE_ADRENO_PROTECT(a840_protect, 15); + +static const struct adreno_reglist a840_gbif[] = { + { REG_A6XX_GBIF_QSB_SIDE0, 0x00071e20 }, + { REG_A6XX_GBIF_QSB_SIDE1, 0x00071e20 }, + { REG_A6XX_GBIF_QSB_SIDE2, 0x00071e20 }, + { REG_A6XX_GBIF_QSB_SIDE3, 0x00071e20 }, + { REG_A8XX_GBIF_CX_CONFIG, 0x20023000 }, + { }, +}; + +static const struct adreno_info a8xx_gpus[] = { + { + .chip_ids = ADRENO_CHIP_IDS(0x44070001), + .family = ADRENO_8XX_GEN2, + .fw = { + [ADRENO_FW_SQE] = "gen80100_sqe.fw", + [ADRENO_FW_GMU] = "gen80100_gmu.bin", + }, + .gmem = 21 * SZ_1M, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_HAS_HW_APRIV, + .funcs = &a8xx_gpu_funcs, + .a6xx = &(const struct a6xx_info) { + .protect = &x285_protect, + .nonctxt_reglist = x285_nonctxt_regs, + .gbif_cx = a840_gbif, + .max_slices = 4, + .gmu_chipid = 0x8010100, + .bcms = (const struct a6xx_bcm[]) { + { .name = "SH0", .buswidth = 16 }, + { .name = "MC0", .buswidth = 4 }, + { + .name = "ACV", + .fixed = true, + .perfmode = BIT(2), + .perfmode_bw = 16500000, + }, + { /* sentinel */ }, + }, + }, + }, { + .chip_ids = ADRENO_CHIP_IDS(0x44050a01), + .family = ADRENO_8XX_GEN2, + .fw = { + [ADRENO_FW_SQE] = "gen80200_sqe.fw", + [ADRENO_FW_GMU] = "gen80200_gmu.bin", + [ADRENO_FW_AQE] = "gen80200_aqe.fw", + }, + .gmem = 18 * SZ_1M, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_HAS_HW_APRIV, + .funcs = &a8xx_gpu_funcs, + .a6xx = &(const struct a6xx_info) { + .protect = &a840_protect, + .nonctxt_reglist = a840_nonctxt_regs, + .gbif_cx = a840_gbif, + .max_slices = 3, + .gmu_chipid = 0x8020100, + .bcms = (const struct a6xx_bcm[]) { + { .name = "SH0", .buswidth = 16 }, + { .name = "MC0", .buswidth = 4 }, + { + .name = "ACV", + .fixed = true, + .perfmode = BIT(2), + .perfmode_bw = 10687500, + }, + { /* sentinel */ }, + }, + }, + .preempt_record_size = 19708 * SZ_1K, + } +}; + +DECLARE_ADRENO_GPULIST(a8xx); + static inline __always_unused void __build_asserts(void) { BUILD_BUG_ON(a630_protect.count > a630_protect.count_max); @@ -1619,4 +1957,5 @@ static inline __always_unused void __build_asserts(void) BUILD_BUG_ON(a660_protect.count > a660_protect.count_max); BUILD_BUG_ON(a690_protect.count > a690_protect.count_max); BUILD_BUG_ON(a730_protect.count > a730_protect.count_max); + BUILD_BUG_ON(a840_protect.count > a840_protect.count_max); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 4e6dc16e4a4c..5903cd891b49 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -224,14 +224,19 @@ unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) { - u32 val; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int local = gmu->idle_level; + u32 val; /* SPTP and IFPC both report as IFPC */ if (gmu->idle_level == GMU_IDLE_STATE_SPTP) local = GMU_IDLE_STATE_IFPC; - val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + if (adreno_is_a8xx(adreno_gpu)) + val = gmu_read(gmu, REG_A8XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + else + val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); if (val == local) { if (gmu->idle_level != GMU_IDLE_STATE_IFPC || @@ -269,7 +274,9 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) /* Set the log wptr index * note: downstream saves the value in poweroff and restores it here */ - if (adreno_is_a7xx(adreno_gpu)) + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GMU_GENERAL_9, 0); + else if (adreno_is_a7xx(adreno_gpu)) gmu_write(gmu, REG_A7XX_GMU_GENERAL_9, 0); else gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); @@ -350,12 +357,18 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { /* Trigger a OOB (out of band) request to the GMU */ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int ret; u32 val; int request, ack; WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + /* Skip OOB calls since RGMU is not enabled */ + if (adreno_has_rgmu(adreno_gpu)) + return 0; + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return -EINVAL; @@ -376,9 +389,23 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Trigger the equested OOB operation */ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); - /* Wait for the acknowledge interrupt */ - ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, - val & (1 << ack), 100, 10000); + do { + /* Wait for the acknowledge interrupt */ + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, + val & (1 << ack), 100, 10000); + + if (!ret) + break; + + if (completion_done(&a6xx_gpu->base.fault_coredump_done)) + break; + + /* We may timeout because the GMU is temporarily wedged from + * pending faults from the GPU and we are taking a devcoredump. + * Wait until the MMU is resumed and try again. + */ + wait_for_completion(&a6xx_gpu->base.fault_coredump_done); + } while (true); if (ret) DRM_DEV_ERROR(gmu->dev, @@ -395,10 +422,16 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Clear a pending OOB state in the GMU */ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int bit; WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + /* Skip OOB calls since RGMU is not enabled */ + if (adreno_has_rgmu(adreno_gpu)) + return; + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return; @@ -485,17 +518,25 @@ static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu) * in the power down sequence not being fully executed. That in turn can * prevent CX_GDSC from collapsing. Assert Qactive to avoid this. */ - if (adreno_is_a621(adreno_gpu) || adreno_is_7c3(adreno_gpu)) - gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0)); + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0)); + else if (adreno_is_a7xx(adreno_gpu) || (adreno_is_a621(adreno_gpu) || + adreno_is_7c3(adreno_gpu))) + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0)); } /* Let the GMU know that we are about to go into slumber */ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int ret; /* Disable the power counter so the GMU isn't busy */ - gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); + else + gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); /* Disable SPTP_PC if the CPU is responsible for it */ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) @@ -522,10 +563,9 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) } out: - a6xx_gemnoc_workaround(gmu); - /* Put fence into allow mode */ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); + a6xx_gemnoc_workaround(gmu); return ret; } @@ -561,16 +601,22 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + u32 bitmask = BIT(16); int ret; u32 val; if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status)) return; + if (adreno_is_a840(adreno_gpu)) + bitmask = BIT(30); + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, - val, val & (1 << 16), 100, 10000); + val, val & bitmask, 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); @@ -584,22 +630,24 @@ static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) writel(value, ptr + (offset << 2)); } -static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, - const char *name); - static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct platform_device *pdev = to_platform_device(gmu->dev); - void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0; void __iomem *seqptr = NULL; uint32_t pdc_address_offset; + void __iomem *pdcptr; bool pdc_in_aop = false; + /* On A8x and above, RPMH/PDC configurations are entirely configured in AOP */ + if (adreno_is_a8xx(adreno_gpu)) + return; + + pdcptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc"); if (IS_ERR(pdcptr)) - goto err; + return; if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) @@ -612,9 +660,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) pdc_address_offset = 0x30080; if (!pdc_in_aop) { - seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); + seqptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc_seq"); if (IS_ERR(seqptr)) - goto err; + return; } /* Disable SDE clock gating */ @@ -704,12 +752,6 @@ setup_pdc: /* ensure no writes happen before the uCode is fully written */ wmb(); - -err: - if (!IS_ERR_OR_NULL(pdcptr)) - iounmap(pdcptr); - if (!IS_ERR_OR_NULL(seqptr)) - iounmap(seqptr); } /* @@ -732,7 +774,7 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); /* A7xx knows better by default! */ - if (adreno_is_a7xx(adreno_gpu)) + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) return; gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); @@ -795,7 +837,9 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) u32 itcm_base = 0x00000000; u32 dtcm_base = 0x00040000; - if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) dtcm_base = 0x10004000; if (gmu->legacy) { @@ -850,7 +894,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; const struct a6xx_info *a6xx_info = adreno_gpu->info->a6xx; + const struct adreno_reglist *gbif_cx = a6xx_info->gbif_cx; u32 fence_range_lower, fence_range_upper; u32 chipid = 0; int ret; @@ -859,12 +905,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) { gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); + } else if (adreno_is_a8xx(adreno_gpu)) { + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); } /* Turn on TCM (Tightly Coupled Memory) retention */ if (adreno_is_a7xx(adreno_gpu)) a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL, 1); - else + else if (!adreno_is_a8xx(adreno_gpu)) gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); ret = a6xx_rpmh_start(gmu); @@ -889,7 +938,10 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); - if (adreno_is_a7xx(adreno_gpu)) { + if (adreno_is_a8xx(adreno_gpu)) { + fence_range_upper = 0x32; + fence_range_lower = 0x8c0; + } else if (adreno_is_a7xx(adreno_gpu)) { fence_range_upper = 0x32; fence_range_lower = 0x8a0; } else { @@ -923,7 +975,12 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */ } - if (adreno_is_a7xx(adreno_gpu)) { + if (adreno_is_a8xx(adreno_gpu)) { + gmu_write(gmu, REG_A8XX_GMU_GENERAL_10, chipid); + gmu_write(gmu, REG_A8XX_GMU_GENERAL_8, + (gmu->log.iova & GENMASK(31, 12)) | + ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); + } else if (adreno_is_a7xx(adreno_gpu)) { gmu_write(gmu, REG_A7XX_GMU_GENERAL_10, chipid); gmu_write(gmu, REG_A7XX_GMU_GENERAL_8, (gmu->log.iova & GENMASK(31, 12)) | @@ -935,6 +992,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) gmu->log.iova | (gmu->log.size / SZ_4K - 1)); } + /* For A7x and newer, do the CX GBIF configurations before GMU wake up */ + for (int i = 0; (gbif_cx && gbif_cx[i].offset); i++) + gpu_write(gpu, gbif_cx[i].offset, gbif_cx[i].value); + + if (adreno_is_a8xx(adreno_gpu)) { + gpu_write(gpu, REG_A8XX_GBIF_CX_CONFIG, 0x20023000); + gmu_write(gmu, REG_A6XX_GMU_MRC_GBIF_QOS_CTRL, 0x33); + } + /* Set up the lowest idle level on the GMU */ a6xx_gmu_power_config(gmu); @@ -986,7 +1052,7 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) u32 val, seqmem_off = 0; /* The second spin of A7xx GPUs messed with some register offsets.. */ - if (adreno_is_a740_family(adreno_gpu)) + if (adreno_is_a740_family(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) seqmem_off = 4; /* Make sure there are no outstanding RPMh votes */ @@ -999,7 +1065,7 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off, val, (val & 1), 100, 1000); - if (!adreno_is_a740_family(adreno_gpu)) + if (!adreno_is_a740_family(adreno_gpu) && !adreno_is_a8xx(adreno_gpu)) return; gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS4_DRV0_STATUS + seqmem_off, @@ -1027,7 +1093,10 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) * Turn off keep alive that might have been enabled by the hang * interrupt */ - gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); + else + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); /* Flush all the queues */ a6xx_hfi_stop(gmu); @@ -1053,7 +1122,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) /* Halt the gmu cm3 core */ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); - a6xx_bus_clear_pending_transactions(adreno_gpu, true); + adreno_gpu->funcs->bus_halt(adreno_gpu, true); /* Reset GPU core blocks */ a6xx_gpu_sw_reset(gpu, true); @@ -1122,6 +1191,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) return ret; } + /* Read the slice info on A8x GPUs */ + a8xx_gpu_get_slice_info(gpu); + /* Set the bus quota to a reasonable value for boot */ a6xx_gmu_set_initial_bw(gpu, gmu); @@ -1131,7 +1203,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) enable_irq(gmu->gmu_irq); /* Check to see if we are doing a cold or warm boot */ - if (adreno_is_a7xx(adreno_gpu)) { + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) { status = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL) == 1 ? GMU_WARM_BOOT : GMU_COLD_BOOT; } else if (gmu->legacy) { @@ -1225,7 +1297,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) if (ret) goto force_off; - a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); + adreno_gpu->funcs->bus_halt(adreno_gpu, a6xx_gpu->hung); /* tell the GMU we want to slumber */ ret = a6xx_gmu_notify_slumber(gmu); @@ -1460,7 +1532,7 @@ static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu, vote = clamp(peak, 1, BCM_TCS_CMD_VOTE_MASK); /* GMUs on A7xx votes on both x & y */ - if (adreno_is_a7xx(adreno_gpu)) + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) data[bcm_index] = BCM_TCS_CMD(commit, true, vote, vote); else data[bcm_index] = BCM_TCS_CMD(commit, true, 0, vote); @@ -1492,13 +1564,14 @@ static unsigned int a6xx_gmu_get_arc_level(struct device *dev, } static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, - unsigned long *freqs, int freqs_count, const char *id) + unsigned long *freqs, int freqs_count, + const char *pri_id, const char *sec_id) { int i, j; const u16 *pri, *sec; size_t pri_count, sec_count; - pri = cmd_db_read_aux_data(id, &pri_count); + pri = cmd_db_read_aux_data(pri_id, &pri_count); if (IS_ERR(pri)) return PTR_ERR(pri); /* @@ -1509,13 +1582,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, if (!pri_count) return -EINVAL; - /* - * Some targets have a separate gfx mxc rail. So try to read that first and then fall back - * to regular mx rail if it is missing - */ - sec = cmd_db_read_aux_data("gmxc.lvl", &sec_count); - if (IS_ERR(sec) && sec != ERR_PTR(-EPROBE_DEFER)) - sec = cmd_db_read_aux_data("mx.lvl", &sec_count); + sec = cmd_db_read_aux_data(sec_id, &sec_count); if (IS_ERR(sec)) return PTR_ERR(sec); @@ -1569,6 +1636,57 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, return 0; } +static int a6xx_gmu_rpmh_dep_votes_init(struct device *dev, u32 *votes, + unsigned long *freqs, int freqs_count) +{ + const u16 *mx; + size_t count; + + mx = cmd_db_read_aux_data("mx.lvl", &count); + if (IS_ERR(mx)) + return PTR_ERR(mx); + /* + * The data comes back as an array of unsigned shorts so adjust the + * count accordingly + */ + count >>= 1; + if (!count) + return -EINVAL; + + /* Fix the vote for zero frequency */ + votes[0] = 0xffffffff; + + /* Construct a vote for rest of the corners */ + for (int i = 1; i < freqs_count; i++) { + unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); + u8 j, index = 0; + + /* Get the primary index that matches the arc level */ + for (j = 0; j < count; j++) { + if (mx[j] >= level) { + index = j; + break; + } + } + + if (j == count) { + DRM_DEV_ERROR(dev, + "Mx Level %u not found in the RPMh list\n", + level); + DRM_DEV_ERROR(dev, "Available levels:\n"); + for (j = 0; j < count; j++) + DRM_DEV_ERROR(dev, " %u\n", mx[j]); + + return -EINVAL; + } + + /* Construct the vote */ + votes[i] = (0x3fff << 14) | (index << 8) | (0xff); + } + + return 0; +} + /* * The GMU votes with the RPMh for itself and on behalf of the GPU but we need * to construct the list of votes on the CPU and send it over. Query the RPMh @@ -1583,15 +1701,27 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; const struct a6xx_info *info = adreno_gpu->info->a6xx; struct msm_gpu *gpu = &adreno_gpu->base; + const char *sec_id; + const u16 *gmxc; int ret; + gmxc = cmd_db_read_aux_data("gmxc.lvl", NULL); + if (gmxc == ERR_PTR(-EPROBE_DEFER)) + return -EPROBE_DEFER; + + /* If GMxC is present, prefer that as secondary rail for GX votes */ + sec_id = IS_ERR_OR_NULL(gmxc) ? "mx.lvl" : "gmxc.lvl"; + /* Build the GX votes */ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, - gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); + gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl", sec_id); /* Build the CX votes */ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, - gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); + gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl", "mx.lvl"); + + ret |= a6xx_gmu_rpmh_dep_votes_init(gmu->dev, gmu->dep_arc_votes, + gmu->gpu_freqs, gmu->nr_gpu_freqs); /* Build the interconnect votes */ if (info->bcms && gmu->nr_gpu_bws > 1) @@ -1795,27 +1925,6 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) return 0; } -static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, - const char *name) -{ - void __iomem *ret; - struct resource *res = platform_get_resource_byname(pdev, - IORESOURCE_MEM, name); - - if (!res) { - DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); - return ERR_PTR(-EINVAL); - } - - ret = ioremap(res->start, resource_size(res)); - if (!ret) { - DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); - return ERR_PTR(-EINVAL); - } - - return ret; -} - static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, const char *name, irq_handler_t handler) { @@ -1866,7 +1975,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - struct platform_device *pdev = to_platform_device(gmu->dev); mutex_lock(&gmu->lock); if (!gmu->initialized) { @@ -1895,12 +2003,11 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) qmp_put(gmu->qmp); iounmap(gmu->mmio); - if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) - iounmap(gmu->rscc); gmu->mmio = NULL; gmu->rscc = NULL; - if (!adreno_has_gmu_wrapper(adreno_gpu)) { + if (!adreno_has_gmu_wrapper(adreno_gpu) && + !adreno_has_rgmu(adreno_gpu)) { a6xx_gmu_memory_free(gmu); free_irq(gmu->gmu_irq, gmu); @@ -1922,10 +2029,38 @@ static int cxpd_notifier_cb(struct notifier_block *nb, return 0; } +static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, + const char *name, resource_size_t *start) +{ + void __iomem *ret; + struct resource *res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, name); + + if (!res) { + DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); + return ERR_PTR(-EINVAL); + } + + ret = ioremap(res->start, resource_size(res)); + if (!ret) { + DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); + return ERR_PTR(-EINVAL); + } + + if (start) + *start = res->start; + + return ret; +} + int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { struct platform_device *pdev = of_find_device_by_node(node); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + resource_size_t start; + struct resource *res; int ret; if (!pdev) @@ -1942,13 +2077,29 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) /* Mark legacy for manual SPTPRAC control */ gmu->legacy = true; + /* RGMU requires clocks */ + ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); + if (ret < 0) + goto err_clk; + + gmu->nr_clocks = ret; + /* Map the GMU registers */ - gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); + gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start); if (IS_ERR(gmu->mmio)) { ret = PTR_ERR(gmu->mmio); goto err_mmio; } + res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory"); + if (!res) { + ret = -EINVAL; + goto err_mmio; + } + + /* Identify gmu base offset from gpu base address */ + gmu->mmio_offset = (u32)(start - res->start); + gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); if (IS_ERR(gmu->cxpd)) { ret = PTR_ERR(gmu->cxpd); @@ -1981,6 +2132,7 @@ detach_cxpd: err_mmio: iounmap(gmu->mmio); +err_clk: /* Drop reference taken in of_find_device_by_node */ put_device(gmu->dev); @@ -1989,10 +2141,13 @@ err_mmio: int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { + struct platform_device *pdev = of_find_device_by_node(node); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - struct platform_device *pdev = of_find_device_by_node(node); struct device_link *link; + resource_size_t start; + struct resource *res; int ret; if (!pdev) @@ -2028,13 +2183,14 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) */ gmu->dummy.size = SZ_4K; if (adreno_is_a660_family(adreno_gpu) || - adreno_is_a7xx(adreno_gpu)) { + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000, "debug"); if (ret) goto err_memory; - gmu->dummy.size = SZ_8K; + gmu->dummy.size = SZ_16K; } /* Allocate memory for the GMU dummy page */ @@ -2045,7 +2201,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) /* Note that a650 family also includes a660 family: */ if (adreno_is_a650_family(adreno_gpu) || - adreno_is_a7xx(adreno_gpu)) { + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, SZ_16M - SZ_16K, 0x04000, "icache"); if (ret) @@ -2087,19 +2244,30 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) goto err_memory; /* Map the GMU registers */ - gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); + gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start); if (IS_ERR(gmu->mmio)) { ret = PTR_ERR(gmu->mmio); goto err_memory; } + res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory"); + if (!res) { + ret = -EINVAL; + goto err_mmio; + } + + /* Identify gmu base offset from gpu base address */ + gmu->mmio_offset = (u32)(start - res->start); + if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) { - gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); + gmu->rscc = devm_platform_ioremap_resource_byname(pdev, "rscc"); if (IS_ERR(gmu->rscc)) { ret = -ENODEV; goto err_mmio; } + } else if (adreno_is_a8xx(adreno_gpu)) { + gmu->rscc = gmu->mmio + 0x19000; } else { gmu->rscc = gmu->mmio + 0x23000; } @@ -2173,8 +2341,6 @@ detach_cxpd: err_mmio: iounmap(gmu->mmio); - if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) - iounmap(gmu->rscc); free_irq(gmu->gmu_irq, gmu); free_irq(gmu->hfi_irq, gmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 06cfc294016f..2af074c8e8cf 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -19,8 +19,8 @@ struct a6xx_gmu_bo { u64 iova; }; -#define GMU_MAX_GX_FREQS 16 -#define GMU_MAX_CX_FREQS 4 +#define GMU_MAX_GX_FREQS 32 +#define GMU_MAX_CX_FREQS 6 #define GMU_MAX_BCMS 3 struct a6xx_bcm { @@ -68,6 +68,7 @@ struct a6xx_gmu { struct drm_gpuvm *vm; void __iomem *mmio; + u32 mmio_offset; void __iomem *rscc; int hfi_irq; @@ -96,6 +97,7 @@ struct a6xx_gmu { int nr_gpu_freqs; unsigned long gpu_freqs[GMU_MAX_GX_FREQS]; u32 gx_arc_votes[GMU_MAX_GX_FREQS]; + u32 dep_arc_votes[GMU_MAX_GX_FREQS]; struct a6xx_hfi_acd_table acd_table; int nr_gpu_bws; @@ -130,20 +132,23 @@ struct a6xx_gmu { unsigned long status; }; +#define GMU_BYTE_OFFSET(gmu, offset) (((offset) << 2) - (gmu)->mmio_offset) + static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) { - return readl(gmu->mmio + (offset << 2)); + /* The 'offset' is based on GPU's start address. Adjust it */ + return readl(gmu->mmio + GMU_BYTE_OFFSET(gmu, offset)); } static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value) { - writel(value, gmu->mmio + (offset << 2)); + writel(value, gmu->mmio + GMU_BYTE_OFFSET(gmu, offset)); } static inline void gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size) { - memcpy_toio(gmu->mmio + (offset << 2), data, size); + memcpy_toio(gmu->mmio + GMU_BYTE_OFFSET(gmu, offset), data, size); wmb(); } @@ -160,17 +165,17 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi) { u64 val; - val = (u64) readl(gmu->mmio + (lo << 2)); - val |= ((u64) readl(gmu->mmio + (hi << 2)) << 32); + val = gmu_read(gmu, lo); + val |= ((u64) gmu_read(gmu, hi) << 32); return val; } #define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \ - readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \ - interval, timeout) + readl_poll_timeout((gmu)->mmio + (GMU_BYTE_OFFSET(gmu, addr)), val, \ + cond, interval, timeout) #define gmu_poll_timeout_atomic(gmu, addr, val, cond, interval, timeout) \ - readl_poll_timeout_atomic((gmu)->mmio + ((addr) << 2), val, cond, \ + readl_poll_timeout_atomic((gmu)->mmio + (GMU_BYTE_OFFSET(gmu, addr)), val, cond, \ interval, timeout) static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index b8f8ae940b55..0200a7e71cdf 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -157,7 +157,7 @@ static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) } } -static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); @@ -224,7 +224,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, OUT_RING(ring, submit->seqno - 1); OUT_PKT7(ring, CP_THREAD_CONTROL, 1); - OUT_RING(ring, CP_SET_THREAD_BOTH); + OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BOTH); /* Reset state used to synchronize BR and BV */ OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1); @@ -235,18 +235,31 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS); OUT_PKT7(ring, CP_THREAD_CONTROL, 1); - OUT_RING(ring, CP_SET_THREAD_BR); + OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BOTH); + + OUT_PKT7(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, LRZ_FLUSH_INVALIDATE); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR); } if (!sysprof) { - if (!adreno_is_a7xx(adreno_gpu)) { + if (!(adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))) { /* Turn off protected mode to write to special registers */ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 0); } - OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1); - OUT_RING(ring, 1); + if (adreno_is_a8xx(adreno_gpu)) { + OUT_PKT4(ring, REG_A8XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1); + OUT_RING(ring, 1); + OUT_PKT4(ring, REG_A8XX_RBBM_SLICE_PERFCTR_SRAM_INIT_CMD, 1); + OUT_RING(ring, 1); + } else { + OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1); + OUT_RING(ring, 1); + } } /* Execute the table update */ @@ -275,7 +288,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, * to make sure BV doesn't race ahead while BR is still switching * pagetables. */ - if (adreno_is_a7xx(&a6xx_gpu->base)) { + if (adreno_is_a7xx(&a6xx_gpu->base) || adreno_is_a8xx(&a6xx_gpu->base)) { OUT_PKT7(ring, CP_THREAD_CONTROL, 1); OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR); } @@ -289,20 +302,22 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, OUT_RING(ring, CACHE_INVALIDATE); if (!sysprof) { + u32 reg_status = adreno_is_a8xx(adreno_gpu) ? + REG_A8XX_RBBM_PERFCTR_SRAM_INIT_STATUS : + REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS; /* * Wait for SRAM clear after the pgtable update, so the * two can happen in parallel: */ OUT_PKT7(ring, CP_WAIT_REG_MEM, 6); OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ)); - OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_LO( - REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS)); + OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_LO(reg_status)); OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_HI(0)); OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1)); OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1)); OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0)); - if (!adreno_is_a7xx(adreno_gpu)) { + if (!(adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))) { /* Re-enable protected mode: */ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 1); @@ -375,7 +390,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) rbmemptr_stats(ring, index, alwayson_end)); /* Write the fence to the scratch register */ - OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1); + OUT_PKT4(ring, REG_A6XX_CP_SCRATCH(2), 1); OUT_RING(ring, submit->seqno); /* @@ -440,6 +455,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = submit->ring; + u32 rbbm_perfctr_cp0, cp_always_on_counter; unsigned int i, ibs = 0; adreno_check_and_reenable_stall(adreno_gpu); @@ -460,10 +476,16 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) if (gpu->nr_rings > 1) a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, submit->queue); - get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), - rbmemptr_stats(ring, index, cpcycles_start)); - get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, - rbmemptr_stats(ring, index, alwayson_start)); + if (adreno_is_a8xx(adreno_gpu)) { + rbbm_perfctr_cp0 = REG_A8XX_RBBM_PERFCTR_CP(0); + cp_always_on_counter = REG_A8XX_CP_ALWAYS_ON_COUNTER; + } else { + rbbm_perfctr_cp0 = REG_A7XX_RBBM_PERFCTR_CP(0); + cp_always_on_counter = REG_A6XX_CP_ALWAYS_ON_COUNTER; + } + + get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_start)); + get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_start)); OUT_PKT7(ring, CP_THREAD_CONTROL, 1); OUT_RING(ring, CP_SET_THREAD_BOTH); @@ -510,14 +532,17 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, 0x00e); /* IB1LIST end */ } - get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), - rbmemptr_stats(ring, index, cpcycles_end)); - get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, - rbmemptr_stats(ring, index, alwayson_end)); + get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_end)); + get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_end)); /* Write the fence to the scratch register */ - OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1); - OUT_RING(ring, submit->seqno); + if (adreno_is_a8xx(adreno_gpu)) { + OUT_PKT4(ring, REG_A8XX_CP_SCRATCH_GLOBAL(2), 1); + OUT_RING(ring, submit->seqno); + } else { + OUT_PKT4(ring, REG_A6XX_CP_SCRATCH(2), 1); + OUT_RING(ring, submit->seqno); + } OUT_PKT7(ring, CP_THREAD_CONTROL, 1); OUT_RING(ring, CP_SET_THREAD_BR); @@ -612,15 +637,26 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) if (adreno_is_a630(adreno_gpu)) clock_cntl_on = 0x8aa8aa02; - else if (adreno_is_a610(adreno_gpu)) + else if (adreno_is_a610(adreno_gpu) || adreno_is_a612(adreno_gpu)) clock_cntl_on = 0xaaa8aa82; else if (adreno_is_a702(adreno_gpu)) clock_cntl_on = 0xaaaaaa82; else clock_cntl_on = 0x8aa8aa82; - cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111; - cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555; + if (adreno_is_a612(adreno_gpu)) + cgc_delay = 0x11; + else if (adreno_is_a615_family(adreno_gpu)) + cgc_delay = 0x111; + else + cgc_delay = 0x10111; + + if (adreno_is_a612(adreno_gpu)) + cgc_hyst = 0x55; + else if (adreno_is_a615_family(adreno_gpu)) + cgc_hyst = 0x555; + else + cgc_hyst = 0x5555; gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0); @@ -706,14 +742,20 @@ static int a6xx_calc_ubwc_config(struct adreno_gpu *gpu) /* Copy the data into the internal struct to drop the const qualifier (temporarily) */ *cfg = *common_cfg; - cfg->ubwc_swizzle = 0x6; - cfg->highest_bank_bit = 15; + /* Use common config as is for A8x */ + if (!adreno_is_a8xx(gpu)) { + cfg->ubwc_swizzle = 0x6; + cfg->highest_bank_bit = 15; + } if (adreno_is_a610(gpu)) { cfg->highest_bank_bit = 13; cfg->ubwc_swizzle = 0x7; } + if (adreno_is_a612(gpu)) + cfg->highest_bank_bit = 14; + if (adreno_is_a618(gpu)) cfg->highest_bank_bit = 14; @@ -993,7 +1035,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, return false; /* A7xx is safe! */ - if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu)) + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) return true; /* @@ -1076,6 +1118,23 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) } } + if (!a6xx_gpu->aqe_bo && adreno_gpu->fw[ADRENO_FW_AQE]) { + a6xx_gpu->aqe_bo = adreno_fw_create_bo(gpu, + adreno_gpu->fw[ADRENO_FW_AQE], &a6xx_gpu->aqe_iova); + + if (IS_ERR(a6xx_gpu->aqe_bo)) { + int ret = PTR_ERR(a6xx_gpu->aqe_bo); + + a6xx_gpu->aqe_bo = NULL; + DRM_DEV_ERROR(&gpu->pdev->dev, + "Could not allocate AQE ucode: %d\n", ret); + + return ret; + } + + msm_gem_object_set_name(a6xx_gpu->aqe_bo, "aqefw"); + } + /* * Expanded APRIV and targets that support WHERE_AM_I both need a * privileged buffer to store the RPTR shadow @@ -1107,7 +1166,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) return 0; } -static int a6xx_zap_shader_init(struct msm_gpu *gpu) +int a6xx_zap_shader_init(struct msm_gpu *gpu) { static bool loaded; int ret; @@ -1220,17 +1279,20 @@ static int hw_init(struct msm_gpu *gpu) /* enable hardware clockgating */ a6xx_set_hwcg(gpu, true); - /* VBIF/GBIF start*/ - if (adreno_is_a610_family(adreno_gpu) || - adreno_is_a640_family(adreno_gpu) || - adreno_is_a650_family(adreno_gpu) || - adreno_is_a7xx(adreno_gpu)) { + /* For gmuwrapper implementations, do the VBIF/GBIF CX configuration here */ + if (adreno_is_a610_family(adreno_gpu)) { gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); - gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, - adreno_is_a7xx(adreno_gpu) ? 0x2120212 : 0x3); + } + + if (adreno_is_a610_family(adreno_gpu) || + adreno_is_a640_family(adreno_gpu) || + adreno_is_a650_family(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3); + } else if (adreno_is_a7xx(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x2120212); } else { gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); } @@ -1285,10 +1347,10 @@ static int hw_init(struct msm_gpu *gpu) } if (adreno_is_a660_family(adreno_gpu)) - gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); + gpu_write(gpu, REG_A7XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); /* Setting the mem pool size */ - if (adreno_is_a610(adreno_gpu)) { + if (adreno_is_a610(adreno_gpu) || adreno_is_a612(adreno_gpu)) { gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48); gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47); } else if (adreno_is_a702(adreno_gpu)) { @@ -1321,7 +1383,8 @@ static int hw_init(struct msm_gpu *gpu) a6xx_set_ubwc_config(gpu); /* Enable fault detection */ - if (adreno_is_a730(adreno_gpu) || + if (adreno_is_a612(adreno_gpu) || + adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff); else if (adreno_is_a690(adreno_gpu)) @@ -1540,7 +1603,7 @@ static void a6xx_recover(struct msm_gpu *gpu) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - int i, active_submits; + int active_submits; adreno_dump_info(gpu); @@ -1548,10 +1611,6 @@ static void a6xx_recover(struct msm_gpu *gpu) /* Sometimes crashstate capture is skipped, so SQE should be halted here again */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); - for (i = 0; i < 8; i++) - DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, - gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); - if (hang_debug) a6xx_dump(gpu); @@ -1576,9 +1635,9 @@ static void a6xx_recover(struct msm_gpu *gpu) */ gpu->active_submits = 0; - if (adreno_has_gmu_wrapper(adreno_gpu)) { + if (adreno_has_gmu_wrapper(adreno_gpu) || adreno_has_rgmu(adreno_gpu)) { /* Drain the outstanding traffic on memory buses */ - a6xx_bus_clear_pending_transactions(adreno_gpu, true); + adreno_gpu->funcs->bus_halt(adreno_gpu, true); /* Reset the GPU to a clean state */ a6xx_gpu_sw_reset(gpu, true); @@ -1737,10 +1796,10 @@ static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *da const char *block = "unknown"; u32 scratch[] = { - gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)), - gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)), - gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)), - gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)), + gpu_read(gpu, REG_A6XX_CP_SCRATCH(4)), + gpu_read(gpu, REG_A6XX_CP_SCRATCH(5)), + gpu_read(gpu, REG_A6XX_CP_SCRATCH(6)), + gpu_read(gpu, REG_A6XX_CP_SCRATCH(7)), }; if (info) @@ -2072,7 +2131,7 @@ static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu) u32 fuse_val; int ret; - if (adreno_is_a750(adreno_gpu)) { + if (adreno_is_a750(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) { /* * Assume that if qcom scm isn't available, that whatever * replacement allows writing the fuse register ourselves. @@ -2098,9 +2157,9 @@ static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu) return ret; /* - * On a750 raytracing may be disabled by the firmware, find out - * whether that's the case. The scm call above sets the fuse - * register. + * On A7XX_GEN3 and newer, raytracing may be disabled by the + * firmware, find out whether that's the case. The scm call + * above sets the fuse register. */ fuse_val = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_SW_FUSE_VALUE); @@ -2161,7 +2220,7 @@ void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_ void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert) { /* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */ - if (adreno_is_a610(to_adreno_gpu(gpu))) + if (adreno_is_a610(to_adreno_gpu(gpu)) || adreno_is_a8xx(to_adreno_gpu(gpu))) return; gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert); @@ -2192,7 +2251,12 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu) msm_devfreq_resume(gpu); - adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate(a6xx_gpu) : a6xx_llc_activate(a6xx_gpu); + if (adreno_is_a8xx(adreno_gpu)) + a8xx_llc_activate(a6xx_gpu); + else if (adreno_is_a7xx(adreno_gpu)) + a7xx_llc_activate(a6xx_gpu); + else + a6xx_llc_activate(a6xx_gpu); return ret; } @@ -2229,6 +2293,12 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) if (ret) goto err_bulk_clk; + ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); + if (ret) { + clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); + goto err_bulk_clk; + } + if (adreno_is_a619_holi(adreno_gpu)) a6xx_sptprac_enable(gmu); @@ -2242,8 +2312,10 @@ err_bulk_clk: err_set_opp: mutex_unlock(&a6xx_gpu->gmu.lock); - if (!ret) + if (!ret) { msm_devfreq_resume(gpu); + a6xx_llc_activate(a6xx_gpu); + } return ret; } @@ -2284,17 +2356,20 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) trace_msm_gpu_suspend(0); + a6xx_llc_deactivate(a6xx_gpu); + msm_devfreq_suspend(gpu); mutex_lock(&a6xx_gpu->gmu.lock); /* Drain the outstanding traffic on memory buses */ - a6xx_bus_clear_pending_transactions(adreno_gpu, true); + adreno_gpu->funcs->bus_halt(adreno_gpu, true); if (adreno_is_a619_holi(adreno_gpu)) a6xx_sptprac_disable(gmu); clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); + clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); pm_runtime_put_sync(gmu->gxpd); dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); @@ -2345,6 +2420,11 @@ static void a6xx_destroy(struct msm_gpu *gpu) drm_gem_object_put(a6xx_gpu->sqe_bo); } + if (a6xx_gpu->aqe_bo) { + msm_gem_unpin_iova(a6xx_gpu->aqe_bo, gpu->vm); + drm_gem_object_put(a6xx_gpu->aqe_bo); + } + if (a6xx_gpu->shadow_bo) { msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->vm); drm_gem_object_put(a6xx_gpu->shadow_bo); @@ -2527,7 +2607,105 @@ static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *i return 0; } -static const struct adreno_gpu_funcs funcs = { +static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + struct adreno_platform_config *config = pdev->dev.platform_data; + struct device_node *node; + struct a6xx_gpu *a6xx_gpu; + struct adreno_gpu *adreno_gpu; + struct msm_gpu *gpu; + extern int enable_preemption; + bool is_a7xx; + int ret, nr_rings = 1; + + a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL); + if (!a6xx_gpu) + return ERR_PTR(-ENOMEM); + + adreno_gpu = &a6xx_gpu->base; + gpu = &adreno_gpu->base; + + mutex_init(&a6xx_gpu->gmu.lock); + + adreno_gpu->registers = NULL; + + /* Check if there is a GMU phandle and set it up */ + node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); + /* FIXME: How do we gracefully handle this? */ + BUG_ON(!node); + + adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper"); + + adreno_gpu->base.hw_apriv = + !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV); + + /* gpu->info only gets assigned in adreno_gpu_init(). A8x is included intentionally */ + is_a7xx = config->info->family >= ADRENO_7XX_GEN1; + + a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx); + + ret = a6xx_set_supported_hw(&pdev->dev, config->info); + if (ret) { + a6xx_llc_slices_destroy(a6xx_gpu); + kfree(a6xx_gpu); + return ERR_PTR(ret); + } + + if ((enable_preemption == 1) || (enable_preemption == -1 && + (config->info->quirks & ADRENO_QUIRK_PREEMPTION))) + nr_rings = 4; + + ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, nr_rings); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } + + /* + * For now only clamp to idle freq for devices where this is known not + * to cause power supply issues: + */ + if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu)) + priv->gpu_clamp_to_idle = true; + + if (adreno_has_gmu_wrapper(adreno_gpu) || adreno_has_rgmu(adreno_gpu)) + ret = a6xx_gmu_wrapper_init(a6xx_gpu, node); + else + ret = a6xx_gmu_init(a6xx_gpu, node); + of_node_put(node); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } + + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) { + ret = a7xx_cx_mem_init(a6xx_gpu); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } + } + + adreno_gpu->uche_trap_base = 0x1fffffffff000ull; + + msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, + adreno_gpu->funcs->mmu_fault_handler); + + ret = a6xx_calc_ubwc_config(adreno_gpu); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } + + /* Set up the preemption specific bits and pieces for each ringbuffer */ + a6xx_preempt_init(gpu); + + return gpu; +} + +const struct adreno_gpu_funcs a6xx_gpu_funcs = { .base = { .get_param = adreno_get_param, .set_param = adreno_set_param, @@ -2554,12 +2732,14 @@ static const struct adreno_gpu_funcs funcs = { .create_private_vm = a6xx_create_private_vm, .get_rptr = a6xx_get_rptr, .progress = a6xx_progress, - .sysprof_setup = a6xx_gmu_sysprof_setup, }, + .init = a6xx_gpu_init, .get_timestamp = a6xx_gmu_get_timestamp, + .bus_halt = a6xx_bus_clear_pending_transactions, + .mmu_fault_handler = a6xx_fault_handler, }; -static const struct adreno_gpu_funcs funcs_gmuwrapper = { +const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs = { .base = { .get_param = adreno_get_param, .set_param = adreno_set_param, @@ -2585,10 +2765,13 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = { .get_rptr = a6xx_get_rptr, .progress = a6xx_progress, }, + .init = a6xx_gpu_init, .get_timestamp = a6xx_get_timestamp, + .bus_halt = a6xx_bus_clear_pending_transactions, + .mmu_fault_handler = a6xx_fault_handler, }; -static const struct adreno_gpu_funcs funcs_a7xx = { +const struct adreno_gpu_funcs a7xx_gpu_funcs = { .base = { .get_param = adreno_get_param, .set_param = adreno_set_param, @@ -2615,111 +2798,36 @@ static const struct adreno_gpu_funcs funcs_a7xx = { .create_private_vm = a6xx_create_private_vm, .get_rptr = a6xx_get_rptr, .progress = a6xx_progress, - .sysprof_setup = a6xx_gmu_sysprof_setup, }, + .init = a6xx_gpu_init, .get_timestamp = a6xx_gmu_get_timestamp, + .bus_halt = a6xx_bus_clear_pending_transactions, + .mmu_fault_handler = a6xx_fault_handler, }; -struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - struct platform_device *pdev = priv->gpu_pdev; - struct adreno_platform_config *config = pdev->dev.platform_data; - struct device_node *node; - struct a6xx_gpu *a6xx_gpu; - struct adreno_gpu *adreno_gpu; - struct msm_gpu *gpu; - extern int enable_preemption; - bool is_a7xx; - int ret; - - a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL); - if (!a6xx_gpu) - return ERR_PTR(-ENOMEM); - - adreno_gpu = &a6xx_gpu->base; - gpu = &adreno_gpu->base; - - mutex_init(&a6xx_gpu->gmu.lock); - - adreno_gpu->registers = NULL; - - /* Check if there is a GMU phandle and set it up */ - node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); - /* FIXME: How do we gracefully handle this? */ - BUG_ON(!node); - - adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper"); - - adreno_gpu->base.hw_apriv = - !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV); - - /* gpu->info only gets assigned in adreno_gpu_init() */ - is_a7xx = config->info->family == ADRENO_7XX_GEN1 || - config->info->family == ADRENO_7XX_GEN2 || - config->info->family == ADRENO_7XX_GEN3; - - a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx); - - ret = a6xx_set_supported_hw(&pdev->dev, config->info); - if (ret) { - a6xx_llc_slices_destroy(a6xx_gpu); - kfree(a6xx_gpu); - return ERR_PTR(ret); - } - - if ((enable_preemption == 1) || (enable_preemption == -1 && - (config->info->quirks & ADRENO_QUIRK_PREEMPTION))) - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 4); - else if (is_a7xx) - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1); - else if (adreno_has_gmu_wrapper(adreno_gpu)) - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1); - else - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); - if (ret) { - a6xx_destroy(&(a6xx_gpu->base.base)); - return ERR_PTR(ret); - } - - /* - * For now only clamp to idle freq for devices where this is known not - * to cause power supply issues: - */ - if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu)) - priv->gpu_clamp_to_idle = true; - - if (adreno_has_gmu_wrapper(adreno_gpu)) - ret = a6xx_gmu_wrapper_init(a6xx_gpu, node); - else - ret = a6xx_gmu_init(a6xx_gpu, node); - of_node_put(node); - if (ret) { - a6xx_destroy(&(a6xx_gpu->base.base)); - return ERR_PTR(ret); - } - - if (adreno_is_a7xx(adreno_gpu)) { - ret = a7xx_cx_mem_init(a6xx_gpu); - if (ret) { - a6xx_destroy(&(a6xx_gpu->base.base)); - return ERR_PTR(ret); - } - } - - adreno_gpu->uche_trap_base = 0x1fffffffff000ull; - - msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, - a6xx_fault_handler); - - ret = a6xx_calc_ubwc_config(adreno_gpu); - if (ret) { - a6xx_destroy(&(a6xx_gpu->base.base)); - return ERR_PTR(ret); - } - - /* Set up the preemption specific bits and pieces for each ringbuffer */ - a6xx_preempt_init(gpu); - - return gpu; -} +const struct adreno_gpu_funcs a8xx_gpu_funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a8xx_hw_init, + .ucode_load = a6xx_ucode_load, + .pm_suspend = a6xx_gmu_pm_suspend, + .pm_resume = a6xx_gmu_pm_resume, + .recover = a8xx_recover, + .submit = a7xx_submit, + .active_ring = a6xx_active_ring, + .irq = a8xx_irq, + .destroy = a6xx_destroy, + .gpu_busy = a8xx_gpu_busy, + .gpu_get_freq = a6xx_gmu_get_freq, + .gpu_set_freq = a6xx_gpu_set_freq, + .create_vm = a6xx_create_vm, + .create_private_vm = a6xx_create_private_vm, + .get_rptr = a6xx_get_rptr, + .progress = a8xx_progress, + }, + .init = a6xx_gpu_init, + .get_timestamp = a8xx_gmu_get_timestamp, + .bus_halt = a8xx_bus_clear_pending_transactions, + .mmu_fault_handler = a8xx_fault_handler, +}; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 0b17d36c36a9..6820216ec5fc 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -46,6 +46,9 @@ struct a6xx_info { const struct adreno_protect *protect; const struct adreno_reglist_list *pwrup_reglist; const struct adreno_reglist_list *ifpc_reglist; + const struct adreno_reglist *gbif_cx; + const struct adreno_reglist_pipe *nonctxt_reglist; + u32 max_slices; u32 gmu_chipid; u32 gmu_cgc_mode; u32 prim_fifo_threshold; @@ -57,6 +60,8 @@ struct a6xx_gpu { struct drm_gem_object *sqe_bo; uint64_t sqe_iova; + struct drm_gem_object *aqe_bo; + uint64_t aqe_iova; struct msm_ringbuffer *cur_ring; struct msm_ringbuffer *next_ring; @@ -101,6 +106,11 @@ struct a6xx_gpu { void *htw_llc_slice; bool have_mmu500; bool hung; + + u32 cached_aperture; + spinlock_t aperture_lock; + + u32 slice_mask; }; #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) @@ -216,6 +226,11 @@ struct a7xx_cp_smmu_info { #define A6XX_PROTECT_RDONLY(_reg, _len) \ ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF)) +extern const struct adreno_gpu_funcs a6xx_gpu_funcs; +extern const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs; +extern const struct adreno_gpu_funcs a7xx_gpu_funcs; +extern const struct adreno_gpu_funcs a8xx_gpu_funcs; + static inline bool a6xx_has_gbif(struct adreno_gpu *gpu) { if(adreno_is_a630(gpu)) @@ -298,5 +313,19 @@ int a6xx_gpu_state_put(struct msm_gpu_state *state); void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off); void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert); int a6xx_fenced_write(struct a6xx_gpu *gpu, u32 offset, u64 value, u32 mask, bool is_64b); - +void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring); +int a6xx_zap_shader_init(struct msm_gpu *gpu); + +void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off); +int a8xx_fault_handler(void *arg, unsigned long iova, int flags, void *data); +void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring); +int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value); +u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate); +int a8xx_gpu_feature_probe(struct msm_gpu *gpu); +void a8xx_gpu_get_slice_info(struct msm_gpu *gpu); +int a8xx_hw_init(struct msm_gpu *gpu); +irqreturn_t a8xx_irq(struct msm_gpu *gpu); +void a8xx_llc_activate(struct a6xx_gpu *a6xx_gpu); +bool a8xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring); +void a8xx_recover(struct msm_gpu *gpu); #endif /* __A6XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 4c7f3c642f6a..d2d6b2fd3cba 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -1255,7 +1255,7 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, return; /* Set the fence to ALLOW mode so we can access the registers */ - gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2], &a6xx_state->gmu_registers[3], false); @@ -1596,7 +1596,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) /* Get the generic state from the adreno core */ adreno_gpu_state_get(gpu, &a6xx_state->base); - if (!adreno_has_gmu_wrapper(adreno_gpu)) { + if (!adreno_has_gmu_wrapper(adreno_gpu) && + !adreno_has_rgmu(adreno_gpu)) { a6xx_get_gmu_registers(gpu, a6xx_state); a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index 1c18499b60bb..b49d8427b59e 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -71,8 +71,8 @@ static const struct a6xx_cluster { u32 sel_val; } a6xx_clusters[] = { CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0), - CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0), - CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9), + CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0), + CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9), CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0), CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0), CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0), @@ -303,8 +303,8 @@ static const u32 a660_registers[] = { static const struct a6xx_registers a6xx_reglist[] = { REGS(a6xx_registers, 0, 0), REGS(a660_registers, 0, 0), - REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0), - REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9), + REGS(a6xx_rb_rac_registers, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 0), + REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 9), }; static const u32 a6xx_ahb_registers[] = { @@ -343,48 +343,48 @@ static const struct a6xx_registers a6xx_gbif_reglist = static const u32 a6xx_gmu_gx_registers[] = { /* GMU GX */ - 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b, - 0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b, - 0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b, - 0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084, - 0x0100, 0x012b, 0x0140, 0x0140, + 0x1a800, 0x1a800, 0x1a810, 0x1a813, 0x1a816, 0x1a816, 0x1a818, 0x1a81b, + 0x1a81e, 0x1a81e, 0x1a820, 0x1a823, 0x1a826, 0x1a826, 0x1a828, 0x1a82b, + 0x1a82e, 0x1a82e, 0x1a830, 0x1a833, 0x1a836, 0x1a836, 0x1a838, 0x1a83b, + 0x1a83e, 0x1a83e, 0x1a840, 0x1a843, 0x1a846, 0x1a846, 0x1a880, 0x1a884, + 0x1a900, 0x1a92b, 0x1a940, 0x1a940, }; static const u32 a6xx_gmu_cx_registers[] = { /* GMU CX */ - 0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a, - 0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c, - 0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089, - 0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0, - 0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140, - 0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154, - 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165, - 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc, - 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201, + 0x1f400, 0x1f407, 0x1f410, 0x1f412, 0x1f500, 0x1f500, 0x1f507, 0x1f50a, + 0x1f800, 0x1f804, 0x1f807, 0x1f808, 0x1f80b, 0x1f80c, 0x1f80f, 0x1f81c, + 0x1f824, 0x1f82a, 0x1f82d, 0x1f830, 0x1f840, 0x1f853, 0x1f887, 0x1f889, + 0x1f8a0, 0x1f8a2, 0x1f8a4, 0x1f8af, 0x1f8c0, 0x1f8c3, 0x1f8d0, 0x1f8d0, + 0x1f8e4, 0x1f8e4, 0x1f8e8, 0x1f8ec, 0x1f900, 0x1f903, 0x1f940, 0x1f940, + 0x1f942, 0x1f944, 0x1f94c, 0x1f94d, 0x1f94f, 0x1f951, 0x1f954, 0x1f954, + 0x1f957, 0x1f958, 0x1f95d, 0x1f95d, 0x1f962, 0x1f962, 0x1f964, 0x1f965, + 0x1f980, 0x1f986, 0x1f990, 0x1f99e, 0x1f9c0, 0x1f9c0, 0x1f9c5, 0x1f9cc, + 0x1f9e0, 0x1f9e2, 0x1f9f0, 0x1f9f0, 0x1fa00, 0x1fa01, /* GMU AO */ - 0x9300, 0x9316, 0x9400, 0x9400, + 0x23b00, 0x23b16, 0x23c00, 0x23c00, }; static const u32 a6xx_gmu_gpucc_registers[] = { /* GPU CC */ - 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b, - 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40, - 0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002, - 0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402, - 0xb800, 0xb802, + 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440b, + 0x24415, 0x2441c, 0x2441e, 0x2442d, 0x2443c, 0x2443d, 0x2443f, 0x24440, + 0x24442, 0x24449, 0x24458, 0x2445a, 0x24540, 0x2455e, 0x24800, 0x24802, + 0x24c00, 0x24c02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25c00, 0x25c02, + 0x26000, 0x26002, /* GPU CC ACD */ - 0xbc00, 0xbc16, 0xbc20, 0xbc27, + 0x26400, 0x26416, 0x26420, 0x26427, }; static const u32 a621_gmu_gpucc_registers[] = { /* GPU CC */ - 0x9800, 0x980e, 0x9c00, 0x9c0e, 0xb000, 0xb004, 0xb400, 0xb404, - 0xb800, 0xb804, 0xbc00, 0xbc05, 0xbc14, 0xbc1d, 0xbc2a, 0xbc30, - 0xbc32, 0xbc32, 0xbc41, 0xbc55, 0xbc66, 0xbc68, 0xbc78, 0xbc7a, - 0xbc89, 0xbc8a, 0xbc9c, 0xbc9e, 0xbca0, 0xbca3, 0xbcb3, 0xbcb5, - 0xbcc5, 0xbcc7, 0xbcd6, 0xbcd8, 0xbce8, 0xbce9, 0xbcf9, 0xbcfc, - 0xbd0b, 0xbd0c, 0xbd1c, 0xbd1e, 0xbd40, 0xbd70, 0xbe00, 0xbe16, - 0xbe20, 0xbe2d, + 0x24000, 0x2400e, 0x24400, 0x2440e, 0x25800, 0x25804, 0x25c00, 0x25c04, + 0x26000, 0x26004, 0x26400, 0x26405, 0x26414, 0x2641d, 0x2642a, 0x26430, + 0x26432, 0x26432, 0x26441, 0x26455, 0x26466, 0x26468, 0x26478, 0x2647a, + 0x26489, 0x2648a, 0x2649c, 0x2649e, 0x264a0, 0x264a3, 0x264b3, 0x264b5, + 0x264c5, 0x264c7, 0x264d6, 0x264d8, 0x264e8, 0x264e9, 0x264f9, 0x264fc, + 0x2650b, 0x2650c, 0x2651c, 0x2651e, 0x26540, 0x26570, 0x26600, 0x26616, + 0x26620, 0x2662d, }; static const u32 a6xx_gmu_cx_rscc_registers[] = { @@ -575,7 +575,7 @@ struct gen7_sptp_cluster_registers { /* statetype: SP block state type for the cluster */ enum a7xx_statetype_id statetype; /* pipe_id: Pipe identifier */ - enum a7xx_pipe pipe_id; + enum adreno_pipe pipe_id; /* context_id: Context identifier */ int context_id; /* location_id: Location identifier */ @@ -801,10 +801,10 @@ static const char *a7xx_statetype_names[] = { }; static const char *a7xx_pipe_names[] = { - A7XX_NAME(A7XX_PIPE_NONE), - A7XX_NAME(A7XX_PIPE_BR), - A7XX_NAME(A7XX_PIPE_BV), - A7XX_NAME(A7XX_PIPE_LPAC), + A7XX_NAME(PIPE_NONE), + A7XX_NAME(PIPE_BR), + A7XX_NAME(PIPE_BV), + A7XX_NAME(PIPE_LPAC), }; static const char *a7xx_cluster_names[] = { diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index 550de6ad68ef..53cfdf4e6c34 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -23,6 +23,7 @@ static const char * const a6xx_hfi_msg_id[] = { HFI_MSG_ID(HFI_H2F_MSG_START), HFI_MSG_ID(HFI_H2F_FEATURE_CTRL), HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START), + HFI_MSG_ID(HFI_H2F_MSG_TABLE), HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE), HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER), }; @@ -105,10 +106,25 @@ static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seq { int ret; u32 val; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + + do { + /* Wait for a response */ + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, + val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000); + + if (!ret) + break; - /* Wait for a response */ - ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, - val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000); + if (completion_done(&a6xx_gpu->base.fault_coredump_done)) + break; + + /* We may timeout because the GMU is temporarily wedged from + * pending faults from the GPU and we are taking a devcoredump. + * Wait until the MMU is resumed and try again. + */ + wait_for_completion(&a6xx_gpu->base.fault_coredump_done); + } while (true); if (ret) { DRM_DEV_ERROR(gmu->dev, @@ -255,11 +271,63 @@ static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu) NULL, 0); } +static int a8xx_hfi_send_perf_table(struct a6xx_gmu *gmu) +{ + unsigned int num_gx_votes = 3, num_cx_votes = 2; + struct a6xx_hfi_table_entry *entry; + struct a6xx_hfi_table *tbl; + int ret, i; + u32 size; + + size = sizeof(*tbl) + (2 * sizeof(tbl->entry[0])) + + (gmu->nr_gpu_freqs * num_gx_votes * sizeof(gmu->gx_arc_votes[0])) + + (gmu->nr_gmu_freqs * num_cx_votes * sizeof(gmu->cx_arc_votes[0])); + tbl = kzalloc(size, GFP_KERNEL); + tbl->type = HFI_TABLE_GPU_PERF; + + /* First fill GX votes */ + entry = &tbl->entry[0]; + entry->count = gmu->nr_gpu_freqs; + entry->stride = num_gx_votes; + + for (i = 0; i < gmu->nr_gpu_freqs; i++) { + unsigned int base = i * entry->stride; + + entry->data[base+0] = gmu->gx_arc_votes[i]; + entry->data[base+1] = gmu->dep_arc_votes[i]; + entry->data[base+2] = gmu->gpu_freqs[i] / 1000; + } + + /* Then fill CX votes */ + entry = (struct a6xx_hfi_table_entry *) + &tbl->entry[0].data[gmu->nr_gpu_freqs * num_gx_votes]; + + entry->count = gmu->nr_gmu_freqs; + entry->stride = num_cx_votes; + + for (i = 0; i < gmu->nr_gmu_freqs; i++) { + unsigned int base = i * entry->stride; + + entry->data[base] = gmu->cx_arc_votes[i]; + entry->data[base+1] = gmu->gmu_freqs[i] / 1000; + } + + ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TABLE, tbl, size, NULL, 0); + + kfree(tbl); + return ret; +} + static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct a6xx_hfi_msg_perf_table msg = { 0 }; int i; + if (adreno_is_a8xx(adreno_gpu)) + return a8xx_hfi_send_perf_table(gmu); + msg.num_gpu_levels = gmu->nr_gpu_freqs; msg.num_gmu_levels = gmu->nr_gmu_freqs; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h index 653ef720e2da..6f9f74a0bc85 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h @@ -185,6 +185,23 @@ struct a6xx_hfi_msg_core_fw_start { u32 handle; }; +#define HFI_H2F_MSG_TABLE 15 + +struct a6xx_hfi_table_entry { + u32 count; + u32 stride; + u32 data[]; +}; + +struct a6xx_hfi_table { + u32 header; + u32 version; + u32 type; +#define HFI_TABLE_BW_VOTE 0 +#define HFI_TABLE_GPU_PERF 1 + struct a6xx_hfi_table_entry entry[]; +}; + #define HFI_H2F_MSG_GX_BW_PERF_VOTE 30 struct a6xx_hfi_gx_bw_perf_vote_cmd { diff --git a/drivers/gpu/drm/msm/adreno/a8xx_gpu.c b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c new file mode 100644 index 000000000000..30de078e9dfd --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ + + +#include "msm_gem.h" +#include "msm_mmu.h" +#include "msm_gpu_trace.h" +#include "a6xx_gpu.h" +#include "a6xx_gmu.xml.h" + +#include <linux/bitfield.h> +#include <linux/devfreq.h> +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/pm_domain.h> +#include <linux/soc/qcom/llcc-qcom.h> + +#define GPU_PAS_ID 13 + +static void a8xx_aperture_slice_set(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 slice) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + u32 val; + + val = A8XX_CP_APERTURE_CNTL_HOST_PIPEID(pipe) | A8XX_CP_APERTURE_CNTL_HOST_SLICEID(slice); + + if (a6xx_gpu->cached_aperture == val) + return; + + gpu_write(gpu, REG_A8XX_CP_APERTURE_CNTL_HOST, val); + + a6xx_gpu->cached_aperture = val; +} + +static void a8xx_aperture_acquire(struct msm_gpu *gpu, enum adreno_pipe pipe, unsigned long *flags) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + spin_lock_irqsave(&a6xx_gpu->aperture_lock, *flags); + + a8xx_aperture_slice_set(gpu, pipe, 0); +} + +static void a8xx_aperture_release(struct msm_gpu *gpu, unsigned long flags) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + spin_unlock_irqrestore(&a6xx_gpu->aperture_lock, flags); +} + +static void a8xx_aperture_clear(struct msm_gpu *gpu) +{ + unsigned long flags; + + a8xx_aperture_acquire(gpu, PIPE_NONE, &flags); + a8xx_aperture_release(gpu, flags); +} + +static void a8xx_write_pipe(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 offset, u32 data) +{ + unsigned long flags; + + a8xx_aperture_acquire(gpu, pipe, &flags); + gpu_write(gpu, offset, data); + a8xx_aperture_release(gpu, flags); +} + +static u32 a8xx_read_pipe_slice(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 slice, u32 offset) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&a6xx_gpu->aperture_lock, flags); + a8xx_aperture_slice_set(gpu, pipe, slice); + val = gpu_read(gpu, offset); + spin_unlock_irqrestore(&a6xx_gpu->aperture_lock, flags); + + return val; +} + +void a8xx_gpu_get_slice_info(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + const struct a6xx_info *info = adreno_gpu->info->a6xx; + u32 slice_mask; + + if (adreno_gpu->info->family < ADRENO_8XX_GEN1) + return; + + if (a6xx_gpu->slice_mask) + return; + + slice_mask = GENMASK(info->max_slices - 1, 0); + + /* GEN1 doesn't support partial slice configurations */ + if (adreno_gpu->info->family == ADRENO_8XX_GEN1) { + a6xx_gpu->slice_mask = slice_mask; + return; + } + + slice_mask &= a6xx_llc_read(a6xx_gpu, + REG_A8XX_CX_MISC_SLICE_ENABLE_FINAL); + + a6xx_gpu->slice_mask = slice_mask; + + /* Chip ID depends on the number of slices available. So update it */ + adreno_gpu->chip_id |= FIELD_PREP(GENMASK(7, 4), hweight32(slice_mask)); +} + +static u32 a8xx_get_first_slice(struct a6xx_gpu *a6xx_gpu) +{ + return ffs(a6xx_gpu->slice_mask) - 1; +} + +static inline bool _a8xx_check_idle(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + /* Check that the GMU is idle */ + if (!a6xx_gmu_isidle(&a6xx_gpu->gmu)) + return false; + + /* Check that the CX master is idle */ + if (gpu_read(gpu, REG_A8XX_RBBM_STATUS) & + ~A8XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER) + return false; + + return !(gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS) & + A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT); +} + +static bool a8xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + /* wait for CP to drain ringbuffer: */ + if (!adreno_idle(gpu, ring)) + return false; + + if (spin_until(_a8xx_check_idle(gpu))) { + DRM_ERROR( + "%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", + gpu->name, __builtin_return_address(0), + gpu_read(gpu, REG_A8XX_RBBM_STATUS), + gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS), + gpu_read(gpu, REG_A6XX_CP_RB_RPTR), + gpu_read(gpu, REG_A6XX_CP_RB_WPTR)); + return false; + } + + return true; +} + +void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + uint32_t wptr; + unsigned long flags; + + spin_lock_irqsave(&ring->preempt_lock, flags); + + /* Copy the shadow to the actual register */ + ring->cur = ring->next; + + /* Make sure to wrap wptr if we need to */ + wptr = get_wptr(ring); + + /* Update HW if this is the current ring and we are not in preempt*/ + if (!a6xx_in_preempt(a6xx_gpu)) { + if (a6xx_gpu->cur_ring == ring) + gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + else + ring->restore_wptr = true; + } else { + ring->restore_wptr = true; + } + + spin_unlock_irqrestore(&ring->preempt_lock, flags); +} + +static void a8xx_set_hwcg(struct msm_gpu *gpu, bool state) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + u32 val; + + if (adreno_is_x285(adreno_gpu) && state) + gpu_write(gpu, REG_A8XX_RBBM_CGC_0_PC, 0x00000702); + + gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, + state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0); + gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, + state ? 0x110111 : 0); + gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, + state ? 0x55555 : 0); + + gpu_write(gpu, REG_A8XX_RBBM_CLOCK_CNTL_GLOBAL, 1); + gpu_write(gpu, REG_A8XX_RBBM_CGC_GLOBAL_LOAD_CMD, !!state); + + if (state) { + gpu_write(gpu, REG_A8XX_RBBM_CGC_P2S_TRIG_CMD, 1); + + if (gpu_poll_timeout(gpu, REG_A8XX_RBBM_CGC_P2S_STATUS, val, + val & A8XX_RBBM_CGC_P2S_STATUS_TXDONE, 1, 10)) { + dev_err(&gpu->pdev->dev, "RBBM_CGC_P2S_STATUS TXDONE Poll failed\n"); + return; + } + + gpu_write(gpu, REG_A8XX_RBBM_CLOCK_CNTL_GLOBAL, 0); + } else { + /* + * GMU enables clk gating in GBIF during boot up. So, + * override that here when hwcg feature is disabled + */ + gpu_rmw(gpu, REG_A8XX_GBIF_CX_CONFIG, BIT(0), 0); + } +} + +static void a8xx_set_cp_protect(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + const struct adreno_protect *protect = adreno_gpu->info->a6xx->protect; + u32 cntl, final_cfg; + unsigned int i; + + cntl = A8XX_CP_PROTECT_CNTL_PIPE_ACCESS_PROT_EN | + A8XX_CP_PROTECT_CNTL_PIPE_ACCESS_FAULT_ON_VIOL_EN | + A8XX_CP_PROTECT_CNTL_PIPE_LAST_SPAN_INF_RANGE | + A8XX_CP_PROTECT_CNTL_PIPE_HALT_SQE_RANGE__MASK; + /* + * Enable access protection to privileged registers, fault on an access + * protect violation and select the last span to protect from the start + * address all the way to the end of the register address space + */ + a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_CP_PROTECT_CNTL_PIPE, cntl); + a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_CP_PROTECT_CNTL_PIPE, cntl); + + a8xx_aperture_clear(gpu); + + for (i = 0; i < protect->count; i++) { + /* Intentionally skip writing to some registers */ + if (protect->regs[i]) { + gpu_write(gpu, REG_A8XX_CP_PROTECT_GLOBAL(i), protect->regs[i]); + final_cfg = protect->regs[i]; + } + } + + /* + * Last span feature is only supported on PIPE specific register. + * So update those here + */ + a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_CP_PROTECT_PIPE(protect->count_max), final_cfg); + a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_CP_PROTECT_PIPE(protect->count_max), final_cfg); + + a8xx_aperture_clear(gpu); +} + +static void a8xx_set_ubwc_config(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + const struct qcom_ubwc_cfg_data *cfg = adreno_gpu->ubwc_config; + u32 level2_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL2); + u32 level3_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL3); + bool rgba8888_lossless = false, fp16compoptdis = false; + bool yuvnotcomptofc = false, min_acc_len_64b = false; + bool rgb565_predicator = false, amsbc = false; + bool ubwc_mode = qcom_ubwc_get_ubwc_mode(cfg); + u32 ubwc_version = cfg->ubwc_enc_version; + u32 hbb, hbb_hi, hbb_lo, mode = 1; + u8 uavflagprd_inv = 2; + + switch (ubwc_version) { + case UBWC_5_0: + amsbc = true; + rgb565_predicator = true; + mode = 4; + break; + case UBWC_4_0: + amsbc = true; + rgb565_predicator = true; + fp16compoptdis = true; + rgba8888_lossless = true; + mode = 2; + break; + case UBWC_3_0: + amsbc = true; + mode = 1; + break; + default: + dev_err(&gpu->pdev->dev, "Unknown UBWC version: 0x%x\n", ubwc_version); + break; + } + + /* + * We subtract 13 from the highest bank bit (13 is the minimum value + * allowed by hw) and write the lowest two bits of the remaining value + * as hbb_lo and the one above it as hbb_hi to the hardware. + */ + WARN_ON(cfg->highest_bank_bit < 13); + hbb = cfg->highest_bank_bit - 13; + hbb_hi = hbb >> 2; + hbb_lo = hbb & 3; + a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5); + a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5); + + a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CCU_NC_MODE_CNTL, + yuvnotcomptofc << 6 | + hbb_hi << 3 | + hbb_lo << 1); + + a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CMP_NC_MODE_CNTL, + mode << 15 | + yuvnotcomptofc << 6 | + rgba8888_lossless << 4 | + fp16compoptdis << 3 | + rgb565_predicator << 2 | + amsbc << 1 | + min_acc_len_64b); + + a8xx_aperture_clear(gpu); + + gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, + level3_swizzling_dis << 13 | + level2_swizzling_dis << 12 | + hbb_hi << 10 | + uavflagprd_inv << 4 | + min_acc_len_64b << 3 | + hbb_lo << 1 | ubwc_mode); + + gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, + level3_swizzling_dis << 7 | + level2_swizzling_dis << 6 | + hbb_hi << 4 | + min_acc_len_64b << 3 | + hbb_lo << 1 | ubwc_mode); +} + +static void a8xx_nonctxt_config(struct msm_gpu *gpu, u32 *gmem_protect) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + const struct a6xx_info *info = adreno_gpu->info->a6xx; + const struct adreno_reglist_pipe *regs = info->nonctxt_reglist; + unsigned int pipe_id, i; + unsigned long flags; + + for (pipe_id = PIPE_NONE; pipe_id <= PIPE_DDE_BV; pipe_id++) { + /* We don't have support for LPAC yet */ + if (pipe_id == PIPE_LPAC) + continue; + + a8xx_aperture_acquire(gpu, pipe_id, &flags); + + for (i = 0; regs[i].offset; i++) { + if (!(BIT(pipe_id) & regs[i].pipe)) + continue; + + if (regs[i].offset == REG_A8XX_RB_GC_GMEM_PROTECT) + *gmem_protect = regs[i].value; + + gpu_write(gpu, regs[i].offset, regs[i].value); + } + + a8xx_aperture_release(gpu, flags); + } + + a8xx_aperture_clear(gpu); +} + +static int a8xx_cp_init(struct msm_gpu *gpu) +{ + struct msm_ringbuffer *ring = gpu->rb[0]; + u32 mask; + + /* Disable concurrent binning before sending CP init */ + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, BIT(27)); + + OUT_PKT7(ring, CP_ME_INIT, 4); + + /* Use multiple HW contexts */ + mask = BIT(0); + + /* Enable error detection */ + mask |= BIT(1); + + /* Set default reset state */ + mask |= BIT(3); + + /* Disable save/restore of performance counters across preemption */ + mask |= BIT(6); + + OUT_RING(ring, mask); + + /* Enable multiple hardware contexts */ + OUT_RING(ring, 0x00000003); + + /* Enable error detection */ + OUT_RING(ring, 0x20000000); + + /* Operation mode mask */ + OUT_RING(ring, 0x00000002); + + a6xx_flush(gpu, ring); + return a8xx_idle(gpu, ring) ? 0 : -EINVAL; +} + +#define A8XX_INT_MASK \ + (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \ + A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \ + A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \ + A6XX_RBBM_INT_0_MASK_CP_SW | \ + A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ + A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \ + A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \ + A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ + A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \ + A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \ + A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ + A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \ + A6XX_RBBM_INT_0_MASK_TSBWRITEERROR | \ + A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION) + +#define A8XX_APRIV_MASK \ + (A8XX_CP_APRIV_CNTL_PIPE_ICACHE | \ + A8XX_CP_APRIV_CNTL_PIPE_RBFETCH | \ + A8XX_CP_APRIV_CNTL_PIPE_RBPRIVLEVEL | \ + A8XX_CP_APRIV_CNTL_PIPE_RBRPWB) + +#define A8XX_BR_APRIV_MASK \ + (A8XX_APRIV_MASK | \ + A8XX_CP_APRIV_CNTL_PIPE_CDREAD | \ + A8XX_CP_APRIV_CNTL_PIPE_CDWRITE) + +#define A8XX_CP_GLOBAL_INT_MASK \ + (A8XX_CP_GLOBAL_INT_MASK_HWFAULTBR | \ + A8XX_CP_GLOBAL_INT_MASK_HWFAULTBV | \ + A8XX_CP_GLOBAL_INT_MASK_HWFAULTLPAC | \ + A8XX_CP_GLOBAL_INT_MASK_HWFAULTAQE0 | \ + A8XX_CP_GLOBAL_INT_MASK_HWFAULTAQE1 | \ + A8XX_CP_GLOBAL_INT_MASK_HWFAULTDDEBR | \ + A8XX_CP_GLOBAL_INT_MASK_HWFAULTDDEBV | \ + A8XX_CP_GLOBAL_INT_MASK_SWFAULTBR | \ + A8XX_CP_GLOBAL_INT_MASK_SWFAULTBV | \ + A8XX_CP_GLOBAL_INT_MASK_SWFAULTLPAC | \ + A8XX_CP_GLOBAL_INT_MASK_SWFAULTAQE0 | \ + A8XX_CP_GLOBAL_INT_MASK_SWFAULTAQE1 | \ + A8XX_CP_GLOBAL_INT_MASK_SWFAULTDDEBR | \ + A8XX_CP_GLOBAL_INT_MASK_SWFAULTDDEBV) + +#define A8XX_CP_INTERRUPT_STATUS_MASK_PIPE \ + (A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFRBWRAP | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB1WRAP | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB2WRAP | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB3WRAP | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFSDSWRAP | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFMRBWRAP | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFVSDWRAP | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_OPCODEERROR | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VSDPARITYERROR | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_REGISTERPROTECTIONERROR | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_ILLEGALINSTRUCTION | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_SMMUFAULT | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPCLIENT| \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPTYPE | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPREAD | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESP | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_RTWROVF | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTWROVF | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTREFCNTOVF | \ + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTCLRRESMISS) + +#define A8XX_CP_HW_FAULT_STATUS_MASK_PIPE \ + (A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFRBFAULT | \ + A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB1FAULT | \ + A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB2FAULT | \ + A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB3FAULT | \ + A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFSDSFAULT | \ + A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFMRBFAULT | \ + A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFVSDFAULT | \ + A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_SQEREADBURSTOVF | \ + A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_EVENTENGINEOVF | \ + A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_UCODEERROR) + +static int hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + unsigned int pipe_id, i; + u32 gmem_protect = 0; + u64 gmem_range_min; + int ret; + + ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + if (ret) + return ret; + + /* Clear the cached value to force aperture configuration next time */ + a6xx_gpu->cached_aperture = UINT_MAX; + a8xx_aperture_clear(gpu); + + /* Clear GBIF halt in case GX domain was not collapsed */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); + gpu_read(gpu, REG_A6XX_GBIF_HALT); + + gpu_write(gpu, REG_A8XX_RBBM_GBIF_HALT, 0); + gpu_read(gpu, REG_A8XX_RBBM_GBIF_HALT); + + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); + + /* + * Disable the trusted memory range - we don't actually supported secure + * memory rendering at this point in time and we don't want to block off + * part of the virtual memory space. + */ + gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000); + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); + + /* Make all blocks contribute to the GPU BUSY perf counter */ + gpu_write(gpu, REG_A8XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); + + /* Setup GMEM Range in UCHE */ + gmem_range_min = SZ_64M; + /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ + gpu_write64(gpu, REG_A8XX_UCHE_CCHE_GC_GMEM_RANGE_MIN, gmem_range_min); + gpu_write64(gpu, REG_A8XX_SP_HLSQ_GC_GMEM_RANGE_MIN, gmem_range_min); + + /* Setup UCHE Trap region */ + gpu_write64(gpu, REG_A8XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base); + gpu_write64(gpu, REG_A8XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base); + gpu_write64(gpu, REG_A8XX_UCHE_CCHE_TRAP_BASE, adreno_gpu->uche_trap_base); + gpu_write64(gpu, REG_A8XX_UCHE_CCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base); + + /* Turn on performance counters */ + gpu_write(gpu, REG_A8XX_RBBM_PERFCTR_CNTL, 0x1); + gpu_write(gpu, REG_A8XX_RBBM_SLICE_PERFCTR_CNTL, 0x1); + + /* Turn on the IFPC counter (countable 4 on XOCLK1) */ + gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_1, + FIELD_PREP(GENMASK(7, 0), 0x4)); + + /* Select CP0 to always count cycles */ + gpu_write(gpu, REG_A8XX_CP_PERFCTR_CP_SEL(0), 1); + + a8xx_set_ubwc_config(gpu); + + /* Set weights for bicubic filtering */ + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(0), 0); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(1), 0x3fe05ff4); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(2), 0x3fa0ebee); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(3), 0x3f5193ed); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(4), 0x3f0243f0); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(5), 0x00000000); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(6), 0x3fd093e8); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(7), 0x3f4133dc); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(8), 0x3ea1dfdb); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(9), 0x3e0283e0); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(10), 0x0000ac2b); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(11), 0x0000f01d); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(12), 0x00114412); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(13), 0x0021980a); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(14), 0x0051ec05); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(15), 0x0000380e); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(16), 0x3ff09001); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(17), 0x3fc10bfa); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(18), 0x3f9193f7); + gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(19), 0x3f7227f7); + + gpu_write(gpu, REG_A8XX_UCHE_CLIENT_PF, BIT(7) | 0x1); + + a8xx_nonctxt_config(gpu, &gmem_protect); + + /* Enable fault detection */ + gpu_write(gpu, REG_A8XX_RBBM_INTERFACE_HANG_INT_CNTL, BIT(30) | 0xcfffff); + gpu_write(gpu, REG_A8XX_RBBM_SLICE_INTERFACE_HANG_INT_CNTL, BIT(30)); + + /* Set up the CX GMU counter 0 to count busy ticks */ + gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); + + /* Enable the power counter */ + gmu_rmw(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_0, 0xff, BIT(5)); + gmu_write(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); + + /* Protect registers from the CP */ + a8xx_set_cp_protect(gpu); + + /* Enable the GMEM save/restore feature for preemption */ + a8xx_write_pipe(gpu, PIPE_BR, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 1); + + for (pipe_id = PIPE_BR; pipe_id <= PIPE_DDE_BV; pipe_id++) { + u32 apriv_mask = A8XX_APRIV_MASK; + unsigned long flags; + + if (pipe_id == PIPE_LPAC) + continue; + + if (pipe_id == PIPE_BR) + apriv_mask = A8XX_BR_APRIV_MASK; + + a8xx_aperture_acquire(gpu, pipe_id, &flags); + gpu_write(gpu, REG_A8XX_CP_APRIV_CNTL_PIPE, apriv_mask); + gpu_write(gpu, REG_A8XX_CP_INTERRUPT_STATUS_MASK_PIPE, + A8XX_CP_INTERRUPT_STATUS_MASK_PIPE); + gpu_write(gpu, REG_A8XX_CP_HW_FAULT_STATUS_MASK_PIPE, + A8XX_CP_HW_FAULT_STATUS_MASK_PIPE); + a8xx_aperture_release(gpu, flags); + } + + a8xx_aperture_clear(gpu); + + /* Enable interrupts */ + gpu_write(gpu, REG_A8XX_CP_INTERRUPT_STATUS_MASK_GLOBAL, A8XX_CP_GLOBAL_INT_MASK); + gpu_write(gpu, REG_A8XX_RBBM_INT_0_MASK, A8XX_INT_MASK); + + ret = adreno_hw_init(gpu); + if (ret) + goto out; + + gpu_write64(gpu, REG_A8XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); + if (a6xx_gpu->aqe_iova) + gpu_write64(gpu, REG_A8XX_CP_AQE_INSTR_BASE_0, a6xx_gpu->aqe_iova); + + /* Set the ringbuffer address */ + gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); + gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT); + + /* Configure the RPTR shadow if needed: */ + gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, shadowptr(a6xx_gpu, gpu->rb[0])); + gpu_write64(gpu, REG_A8XX_CP_RB_RPTR_ADDR_BV, rbmemptr(gpu->rb[0], bv_rptr)); + + for (i = 0; i < gpu->nr_rings; i++) + a6xx_gpu->shadow[i] = 0; + + /* Always come up on rb 0 */ + a6xx_gpu->cur_ring = gpu->rb[0]; + + for (i = 0; i < gpu->nr_rings; i++) + gpu->rb[i]->cur_ctx_seqno = 0; + + /* Enable the SQE_to start the CP engine */ + gpu_write(gpu, REG_A8XX_CP_SQE_CNTL, 1); + + ret = a8xx_cp_init(gpu); + if (ret) + goto out; + + /* + * Try to load a zap shader into the secure world. If successful + * we can use the CP to switch out of secure mode. If not then we + * have no resource but to try to switch ourselves out manually. If we + * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will + * be blocked and a permissions violation will soon follow. + */ + ret = a6xx_zap_shader_init(gpu); + if (!ret) { + OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); + OUT_RING(gpu->rb[0], 0x00000000); + + a6xx_flush(gpu, gpu->rb[0]); + if (!a8xx_idle(gpu, gpu->rb[0])) + return -EINVAL; + } else if (ret == -ENODEV) { + /* + * This device does not use zap shader (but print a warning + * just in case someone got their dt wrong.. hopefully they + * have a debug UART to realize the error of their ways... + * if you mess this up you are about to crash horribly) + */ + dev_warn_once(gpu->dev->dev, + "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); + ret = 0; + } else { + return ret; + } + + /* + * GMEM_PROTECT register should be programmed after GPU is transitioned to + * non-secure mode + */ + a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_GC_GMEM_PROTECT, gmem_protect); + WARN_ON(!gmem_protect); + a8xx_aperture_clear(gpu); + + /* Enable hardware clockgating */ + a8xx_set_hwcg(gpu, true); +out: + /* + * Tell the GMU that we are done touching the GPU and it can start power + * management + */ + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + + return ret; +} + +int a8xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int ret; + + mutex_lock(&a6xx_gpu->gmu.lock); + ret = hw_init(gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); + + return ret; +} + +static void a8xx_dump(struct msm_gpu *gpu) +{ + DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", gpu_read(gpu, REG_A8XX_RBBM_STATUS)); + adreno_dump(gpu); +} + +void a8xx_recover(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + int active_submits; + + adreno_dump_info(gpu); + + if (hang_debug) + a8xx_dump(gpu); + + /* + * To handle recovery specific sequences during the rpm suspend we are + * about to trigger + */ + a6xx_gpu->hung = true; + + /* Halt SQE first */ + gpu_write(gpu, REG_A8XX_CP_SQE_CNTL, 3); + + pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); + + /* active_submit won't change until we make a submission */ + mutex_lock(&gpu->active_lock); + active_submits = gpu->active_submits; + + /* + * Temporarily clear active_submits count to silence a WARN() in the + * runtime suspend cb + */ + gpu->active_submits = 0; + + reinit_completion(&gmu->pd_gate); + dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb); + dev_pm_genpd_synced_poweroff(gmu->cxpd); + + /* Drop the rpm refcount from active submits */ + if (active_submits) + pm_runtime_put(&gpu->pdev->dev); + + /* And the final one from recover worker */ + pm_runtime_put_sync(&gpu->pdev->dev); + + if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000))) + DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n"); + + dev_pm_genpd_remove_notifier(gmu->cxpd); + + pm_runtime_use_autosuspend(&gpu->pdev->dev); + + if (active_submits) + pm_runtime_get(&gpu->pdev->dev); + + pm_runtime_get_sync(&gpu->pdev->dev); + + gpu->active_submits = active_submits; + mutex_unlock(&gpu->active_lock); + + msm_gpu_hw_init(gpu); + a6xx_gpu->hung = false; +} + +static const char *a8xx_uche_fault_block(struct msm_gpu *gpu, u32 mid) +{ + static const char * const uche_clients[] = { + "BR_VFD", "BR_SP", "BR_VSC", "BR_VPC", "BR_HLSQ", "BR_PC", "BR_LRZ", "BR_TP", + "BV_VFD", "BV_SP", "BV_VSC", "BV_VPC", "BV_HLSQ", "BV_PC", "BV_LRZ", "BV_TP", + "STCHE", + }; + static const char * const uche_clients_lpac[] = { + "-", "SP_LPAC", "-", "-", "HLSQ_LPAC", "-", "-", "TP_LPAC", + }; + u32 val; + + /* + * The source of the data depends on the mid ID read from FSYNR1. + * and the client ID read from the UCHE block + */ + val = gpu_read(gpu, REG_A8XX_UCHE_CLIENT_PF); + + val &= GENMASK(6, 0); + + /* mid=3 refers to BR or BV */ + if (mid == 3) { + if (val < ARRAY_SIZE(uche_clients)) + return uche_clients[val]; + else + return "UCHE"; + } + + /* mid=8 refers to LPAC */ + if (mid == 8) { + if (val < ARRAY_SIZE(uche_clients_lpac)) + return uche_clients_lpac[val]; + else + return "UCHE_LPAC"; + } + + return "Unknown"; +} + +static const char *a8xx_fault_block(struct msm_gpu *gpu, u32 id) +{ + switch (id) { + case 0x0: + return "CP"; + case 0x1: + return "UCHE: Unknown"; + case 0x2: + return "UCHE_LPAC: Unknown"; + case 0x3: + case 0x8: + return a8xx_uche_fault_block(gpu, id); + case 0x4: + return "CCU"; + case 0x5: + return "Flag cache"; + case 0x6: + return "PREFETCH"; + case 0x7: + return "GMU"; + case 0x9: + return "UCHE_HPAC"; + } + + return "Unknown"; +} + +int a8xx_fault_handler(void *arg, unsigned long iova, int flags, void *data) +{ + struct msm_gpu *gpu = arg; + struct adreno_smmu_fault_info *info = data; + const char *block = "unknown"; + + u32 scratch[] = { + gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(0)), + gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(1)), + gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(2)), + gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(3)), + }; + + if (info) + block = a8xx_fault_block(gpu, info->fsynr1 & 0xff); + + return adreno_fault_handler(gpu, iova, flags, info, block, scratch); +} + +static void a8xx_cp_hw_err_irq(struct msm_gpu *gpu) +{ + u32 status = gpu_read(gpu, REG_A8XX_CP_INTERRUPT_STATUS_GLOBAL); + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + u32 slice = a8xx_get_first_slice(a6xx_gpu); + u32 hw_fault_mask = GENMASK(6, 0); + u32 sw_fault_mask = GENMASK(22, 16); + u32 pipe = 0; + + dev_err_ratelimited(&gpu->pdev->dev, "CP Fault Global INT status: 0x%x\n", status); + + if (status & (A8XX_CP_GLOBAL_INT_MASK_HWFAULTBR | + A8XX_CP_GLOBAL_INT_MASK_SWFAULTBR)) + pipe |= BIT(PIPE_BR); + + if (status & (A8XX_CP_GLOBAL_INT_MASK_HWFAULTBV | + A8XX_CP_GLOBAL_INT_MASK_SWFAULTBV)) + pipe |= BIT(PIPE_BV); + + if (!pipe) { + dev_err_ratelimited(&gpu->pdev->dev, "CP Fault Unknown pipe\n"); + goto out; + } + + for (unsigned int pipe_id = PIPE_NONE; pipe_id <= PIPE_DDE_BV; pipe_id++) { + if (!(BIT(pipe_id) & pipe)) + continue; + + if (hw_fault_mask & status) { + status = a8xx_read_pipe_slice(gpu, pipe_id, slice, + REG_A8XX_CP_HW_FAULT_STATUS_PIPE); + dev_err_ratelimited(&gpu->pdev->dev, + "CP HW FAULT pipe: %u status: 0x%x\n", pipe_id, status); + } + + if (sw_fault_mask & status) { + status = a8xx_read_pipe_slice(gpu, pipe_id, slice, + REG_A8XX_CP_INTERRUPT_STATUS_PIPE); + dev_err_ratelimited(&gpu->pdev->dev, + "CP SW FAULT pipe: %u status: 0x%x\n", pipe_id, status); + + if (status & BIT(8)) { + a8xx_write_pipe(gpu, pipe_id, REG_A8XX_CP_SQE_STAT_ADDR_PIPE, 1); + status = a8xx_read_pipe_slice(gpu, pipe_id, slice, + REG_A8XX_CP_SQE_STAT_DATA_PIPE); + dev_err_ratelimited(&gpu->pdev->dev, + "CP Opcode error, opcode=0x%x\n", status); + } + + if (status & BIT(10)) { + status = a8xx_read_pipe_slice(gpu, pipe_id, slice, + REG_A8XX_CP_PROTECT_STATUS_PIPE); + dev_err_ratelimited(&gpu->pdev->dev, + "CP REG PROTECT error, status=0x%x\n", status); + } + } + } + +out: + /* Turn off interrupts to avoid triggering recovery again */ + a8xx_aperture_clear(gpu); + gpu_write(gpu, REG_A8XX_CP_INTERRUPT_STATUS_MASK_GLOBAL, 0); + gpu_write(gpu, REG_A8XX_RBBM_INT_0_MASK, 0); + + kthread_queue_work(gpu->worker, &gpu->recover_work); +} + +static u32 gpu_periph_read(struct msm_gpu *gpu, u32 dbg_offset) +{ + gpu_write(gpu, REG_A8XX_CP_SQE_UCODE_DBG_ADDR_PIPE, dbg_offset); + + return gpu_read(gpu, REG_A8XX_CP_SQE_UCODE_DBG_DATA_PIPE); +} + +static u64 gpu_periph_read64(struct msm_gpu *gpu, u32 dbg_offset) +{ + u64 lo, hi; + + lo = gpu_periph_read(gpu, dbg_offset); + hi = gpu_periph_read(gpu, dbg_offset + 1); + + return (hi << 32) | lo; +} + +#define CP_PERIPH_IB1_BASE_LO 0x7005 +#define CP_PERIPH_IB1_BASE_HI 0x7006 +#define CP_PERIPH_IB1_SIZE 0x7007 +#define CP_PERIPH_IB1_OFFSET 0x7008 +#define CP_PERIPH_IB2_BASE_LO 0x7009 +#define CP_PERIPH_IB2_BASE_HI 0x700a +#define CP_PERIPH_IB2_SIZE 0x700b +#define CP_PERIPH_IB2_OFFSET 0x700c +#define CP_PERIPH_IB3_BASE_LO 0x700d +#define CP_PERIPH_IB3_BASE_HI 0x700e +#define CP_PERIPH_IB3_SIZE 0x700f +#define CP_PERIPH_IB3_OFFSET 0x7010 + +static void a8xx_fault_detect_irq(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); + unsigned long flags; + + /* + * If stalled on SMMU fault, we could trip the GPU's hang detection, + * but the fault handler will trigger the devcore dump, and we want + * to otherwise resume normally rather than killing the submit, so + * just bail. + */ + if (gpu_read(gpu, REG_A8XX_RBBM_MISC_STATUS) & A8XX_RBBM_MISC_STATUS_SMMU_STALLED_ON_FAULT) + return; + + /* + * Force the GPU to stay on until after we finish + * collecting information + */ + if (!adreno_has_gmu_wrapper(adreno_gpu)) + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1); + + DRM_DEV_ERROR(&gpu->pdev->dev, + "gpu fault ring %d fence %x status %8.8X gfx_status %8.8X\n", + ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0, + gpu_read(gpu, REG_A8XX_RBBM_STATUS), gpu_read(gpu, REG_A8XX_RBBM_GFX_STATUS)); + + a8xx_aperture_acquire(gpu, PIPE_BR, &flags); + + DRM_DEV_ERROR(&gpu->pdev->dev, + "BR: status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x ib3 %16.16llX/%4.4x\n", + gpu_read(gpu, REG_A8XX_RBBM_GFX_BR_STATUS), + gpu_read(gpu, REG_A6XX_CP_RB_RPTR), + gpu_read(gpu, REG_A6XX_CP_RB_WPTR), + gpu_periph_read64(gpu, CP_PERIPH_IB1_BASE_LO), + gpu_periph_read(gpu, CP_PERIPH_IB1_OFFSET), + gpu_periph_read64(gpu, CP_PERIPH_IB2_BASE_LO), + gpu_periph_read(gpu, CP_PERIPH_IB2_OFFSET), + gpu_periph_read64(gpu, CP_PERIPH_IB3_BASE_LO), + gpu_periph_read(gpu, CP_PERIPH_IB3_OFFSET)); + + a8xx_aperture_release(gpu, flags); + a8xx_aperture_acquire(gpu, PIPE_BV, &flags); + + DRM_DEV_ERROR(&gpu->pdev->dev, + "BV: status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x ib3 %16.16llX/%4.4x\n", + gpu_read(gpu, REG_A8XX_RBBM_GFX_BV_STATUS), + gpu_read(gpu, REG_A8XX_CP_RB_RPTR_BV), + gpu_read(gpu, REG_A6XX_CP_RB_WPTR), + gpu_periph_read64(gpu, CP_PERIPH_IB1_BASE_LO), + gpu_periph_read(gpu, CP_PERIPH_IB1_OFFSET), + gpu_periph_read64(gpu, CP_PERIPH_IB2_BASE_LO), + gpu_periph_read(gpu, CP_PERIPH_IB2_OFFSET), + gpu_periph_read64(gpu, CP_PERIPH_IB3_BASE_LO), + gpu_periph_read(gpu, CP_PERIPH_IB3_OFFSET)); + + a8xx_aperture_release(gpu, flags); + a8xx_aperture_clear(gpu); + + /* Turn off the hangcheck timer to keep it from bothering us */ + timer_delete(&gpu->hangcheck_timer); + + kthread_queue_work(gpu->worker, &gpu->recover_work); +} + +static void a8xx_sw_fuse_violation_irq(struct msm_gpu *gpu) +{ + u32 status; + + status = gpu_read(gpu, REG_A8XX_RBBM_SW_FUSE_INT_STATUS); + gpu_write(gpu, REG_A8XX_RBBM_SW_FUSE_INT_MASK, 0); + + dev_err_ratelimited(&gpu->pdev->dev, "SW fuse violation status=%8.8x\n", status); + + /* + * Ignore FASTBLEND violations, because the HW will silently fall back + * to legacy blending. + */ + if (status & (A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING | + A7XX_CX_MISC_SW_FUSE_VALUE_LPAC)) { + timer_delete(&gpu->hangcheck_timer); + + kthread_queue_work(gpu->worker, &gpu->recover_work); + } +} + +irqreturn_t a8xx_irq(struct msm_gpu *gpu) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + u32 status = gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS); + + gpu_write(gpu, REG_A8XX_RBBM_INT_CLEAR_CMD, status); + + if (priv->disable_err_irq) + status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS; + + if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT) + a8xx_fault_detect_irq(gpu); + + if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR) { + u32 rl0, rl1; + + rl0 = gpu_read(gpu, REG_A8XX_CP_RL_ERROR_DETAILS_0); + rl1 = gpu_read(gpu, REG_A8XX_CP_RL_ERROR_DETAILS_1); + dev_err_ratelimited(&gpu->pdev->dev, + "CP | AHB bus error RL_ERROR_0: %x, RL_ERROR_1: %x\n", rl0, rl1); + } + + if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR) + a8xx_cp_hw_err_irq(gpu); + + if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW) + dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n"); + + if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW) + dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n"); + + if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) + dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n"); + + if (status & A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR) + dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Trap interrupt\n"); + + if (status & A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION) + a8xx_sw_fuse_violation_irq(gpu); + + if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) { + msm_gpu_retire(gpu); + a6xx_preempt_trigger(gpu); + } + + if (status & A6XX_RBBM_INT_0_MASK_CP_SW) + a6xx_preempt_irq(gpu); + + return IRQ_HANDLED; +} + +void a8xx_llc_activate(struct a6xx_gpu *a6xx_gpu) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + + if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { + u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + + gpu_scid &= GENMASK(5, 0); + + gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, + FIELD_PREP(GENMASK(29, 24), gpu_scid) | + FIELD_PREP(GENMASK(23, 18), gpu_scid) | + FIELD_PREP(GENMASK(17, 12), gpu_scid) | + FIELD_PREP(GENMASK(11, 6), gpu_scid) | + FIELD_PREP(GENMASK(5, 0), gpu_scid)); + + gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, + FIELD_PREP(GENMASK(27, 22), gpu_scid) | + FIELD_PREP(GENMASK(21, 16), gpu_scid) | + FIELD_PREP(GENMASK(15, 10), gpu_scid) | + BIT(8)); + } + + llcc_slice_activate(a6xx_gpu->htw_llc_slice); +} + +#define GBIF_CLIENT_HALT_MASK BIT(0) +#define GBIF_ARB_HALT_MASK BIT(1) +#define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0) +#define VBIF_RESET_ACK_MASK 0xF0 +#define GPR0_GBIF_HALT_REQUEST 0x1E0 + +void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + + if (gx_off) { + /* Halt the gx side of GBIF */ + gpu_write(gpu, REG_A8XX_RBBM_GBIF_HALT, 1); + spin_until(gpu_read(gpu, REG_A8XX_RBBM_GBIF_HALT_ACK) & 1); + } + + /* Halt new client requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); + + /* Halt all AXI requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); + + /* The GBIF halt needs to be explicitly cleared */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); +} + +int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + mutex_lock(&a6xx_gpu->gmu.lock); + + /* Force the GPU power on so we can read this register */ + a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); + + *value = gpu_read64(gpu, REG_A8XX_CP_ALWAYS_ON_COUNTER); + + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); + + mutex_unlock(&a6xx_gpu->gmu.lock); + + return 0; +} + +u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + u64 busy_cycles; + + /* 19.2MHz */ + *out_sample_rate = 19200000; + + busy_cycles = gmu_read64(&a6xx_gpu->gmu, + REG_A8XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L, + REG_A8XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H); + + return busy_cycles; +} + +bool a8xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + return true; +} diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 28f744f3caf7..554d746f115b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -34,6 +34,7 @@ extern const struct adreno_gpulist a4xx_gpulist; extern const struct adreno_gpulist a5xx_gpulist; extern const struct adreno_gpulist a6xx_gpulist; extern const struct adreno_gpulist a7xx_gpulist; +extern const struct adreno_gpulist a8xx_gpulist; static const struct adreno_gpulist *gpulists[] = { &a2xx_gpulist, @@ -42,6 +43,7 @@ static const struct adreno_gpulist *gpulists[] = { &a5xx_gpulist, &a6xx_gpulist, &a7xx_gpulist, + &a8xx_gpulist, }; static const struct adreno_info *adreno_info(uint32_t chip_id) @@ -235,7 +237,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) priv->has_cached_coherent = !!(info->quirks & ADRENO_QUIRK_HAS_CACHED_COHERENT); - gpu = info->init(drm); + gpu = info->funcs->init(drm); if (IS_ERR(gpu)) { dev_warn(drm->dev, "failed to load adreno gpu\n"); return PTR_ERR(gpu); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h index 04b49d385f9d..d513e03fef08 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h @@ -82,85 +82,85 @@ static const u32 gen7_0_0_debugbus_blocks[] = { }; static const struct gen7_shader_block gen7_0_0_shader_blocks[] = { - {A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_INST_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_INST_DATA_1, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_0_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_1_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_2_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_3_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_4_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_5_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_6_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_7_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_CB_RAM, 0x390, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_INST_TAG, 0x90, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_INST_DATA_2, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_TMO_TAG, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_SMO_TAG, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_STATE_DATA, 0x40, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_HWAVE_RAM, 0x100, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_L0_INST_BUF, 0x50, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_8_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_9_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_10_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_11_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_12_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_MISC_RAM_1, 0x200, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_TP0_TMO_DATA, 0x200, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_TP0_SMO_DATA, 0x80, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_INST_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_INST_DATA_1, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_0_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_1_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_2_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_3_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_4_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_5_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_6_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_7_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_CB_RAM, 0x390, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_INST_TAG, 0x90, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_INST_DATA_2, 0x200, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_TMO_TAG, 0x80, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_SMO_TAG, 0x80, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_STATE_DATA, 0x40, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_HWAVE_RAM, 0x100, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_L0_INST_BUF, 0x50, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_8_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_9_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_10_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_11_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_12_DATA, 0x200, 4, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_MISC_RAM_1, 0x200, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, }; static const u32 gen7_0_0_pre_crashdumper_gpu_registers[] = { @@ -303,7 +303,7 @@ static const u32 gen7_0_0_noncontext_rb_rbp_pipe_br_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_noncontext_rb_rbp_pipe_br_registers), 8)); -/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: A7XX_PIPE_BR */ +/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: PIPE_BR */ static const u32 gen7_0_0_gras_cluster_gras_pipe_br_registers[] = { 0x08000, 0x08008, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d, 0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa, @@ -313,7 +313,7 @@ static const u32 gen7_0_0_gras_cluster_gras_pipe_br_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_gras_cluster_gras_pipe_br_registers), 8)); -/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: A7XX_PIPE_BV */ +/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: PIPE_BV */ static const u32 gen7_0_0_gras_cluster_gras_pipe_bv_registers[] = { 0x08000, 0x08008, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d, 0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa, @@ -323,7 +323,7 @@ static const u32 gen7_0_0_gras_cluster_gras_pipe_bv_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_gras_cluster_gras_pipe_bv_registers), 8)); -/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */ +/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BR */ static const u32 gen7_0_0_pc_cluster_fe_pipe_br_registers[] = { 0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886, 0x09b00, 0x09b08, @@ -331,7 +331,7 @@ static const u32 gen7_0_0_pc_cluster_fe_pipe_br_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_pc_cluster_fe_pipe_br_registers), 8)); -/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */ +/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BV */ static const u32 gen7_0_0_pc_cluster_fe_pipe_bv_registers[] = { 0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886, 0x09b00, 0x09b08, @@ -339,7 +339,7 @@ static const u32 gen7_0_0_pc_cluster_fe_pipe_bv_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_pc_cluster_fe_pipe_bv_registers), 8)); -/* Block: RB_RAC Cluster: A7XX_CLUSTER_PS Pipeline: A7XX_PIPE_BR */ +/* Block: RB_RAC Cluster: A7XX_CLUSTER_PS Pipeline: PIPE_BR */ static const u32 gen7_0_0_rb_rac_cluster_ps_pipe_br_registers[] = { 0x08802, 0x08802, 0x08804, 0x08806, 0x08809, 0x0880a, 0x0880e, 0x08811, 0x08818, 0x0881e, 0x08821, 0x08821, 0x08823, 0x08826, 0x08829, 0x08829, @@ -355,7 +355,7 @@ static const u32 gen7_0_0_rb_rac_cluster_ps_pipe_br_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_rb_rac_cluster_ps_pipe_br_registers), 8)); -/* Block: RB_RBP Cluster: A7XX_CLUSTER_PS Pipeline: A7XX_PIPE_BR */ +/* Block: RB_RBP Cluster: A7XX_CLUSTER_PS Pipeline: PIPE_BR */ static const u32 gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers[] = { 0x08800, 0x08801, 0x08803, 0x08803, 0x0880b, 0x0880d, 0x08812, 0x08812, 0x08820, 0x08820, 0x08822, 0x08822, 0x08827, 0x08828, 0x0882a, 0x0882a, @@ -370,7 +370,7 @@ static const u32 gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: HLSQ_STATE */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers[] = { 0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a99e, 0x0a9a7, 0x0a9a7, 0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9b0, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9ba, @@ -381,7 +381,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: HLSQ_STATE */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: HLSQ_STATE */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers[] = { 0x0a9b0, 0x0a9b0, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9ba, 0x0a9bc, 0x0a9bc, 0x0a9c4, 0x0a9c4, 0x0a9cd, 0x0a9cd, 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9fc, @@ -390,21 +390,21 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: HLSQ_DP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: HLSQ_DP */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers[] = { 0x0a9b1, 0x0a9b1, 0x0a9c6, 0x0a9cb, 0x0a9d4, 0x0a9df, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: HLSQ_DP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: HLSQ_DP */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers[] = { 0x0a9b1, 0x0a9b1, 0x0a9d4, 0x0a9df, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: SP_TOP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: SP_TOP */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers[] = { 0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a9a2, 0x0a9a7, 0x0a9a8, 0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9ae, 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5, @@ -414,7 +414,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: SP_TOP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: SP_TOP */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers[] = { 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9bc, 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9f9, 0x0aa00, 0x0aa00, 0x0ab00, 0x0ab00, @@ -422,7 +422,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: uSPTP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: uSPTP */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers[] = { 0x0a980, 0x0a982, 0x0a985, 0x0a9a6, 0x0a9a8, 0x0a9a9, 0x0a9ab, 0x0a9ae, 0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9bf, 0x0a9c2, 0x0a9c3, @@ -432,7 +432,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: uSPTP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: uSPTP */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers[] = { 0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9be, 0x0a9c2, 0x0a9c3, 0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa31, 0x0aa31, 0x0ab00, 0x0ab01, @@ -440,7 +440,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR Location: HLSQ_STATE */ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers[] = { 0x0a800, 0x0a800, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824, 0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a833, 0x0a835, 0x0a83a, 0x0a83a, @@ -453,7 +453,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: HLSQ_STATE */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV Location: HLSQ_STATE */ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers[] = { 0x0a800, 0x0a800, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824, 0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a833, 0x0a835, 0x0a83a, 0x0a83a, @@ -466,7 +466,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: SP_TOP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR Location: SP_TOP */ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers[] = { 0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a830, 0x0a831, 0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840, 0x0a85c, 0x0a85d, @@ -477,7 +477,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: SP_TOP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV Location: SP_TOP */ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers[] = { 0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a830, 0x0a831, 0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840, 0x0a85c, 0x0a85d, @@ -488,7 +488,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: uSPTP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR Location: uSPTP */ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers[] = { 0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a830, 0x0a833, 0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861, 0x0a863, 0x0a867, @@ -498,7 +498,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: uSPTP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV Location: uSPTP */ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers[] = { 0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a830, 0x0a833, 0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861, 0x0a863, 0x0a867, @@ -508,7 +508,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers), 8)); -/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR */ +/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR */ static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers[] = { 0x0b180, 0x0b183, 0x0b190, 0x0b195, 0x0b2c0, 0x0b2d5, 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310, @@ -516,35 +516,35 @@ static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: HLSQ_STATE */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV Location: HLSQ_STATE */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_hlsq_state_registers[] = { 0x0ab00, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_hlsq_state_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: SP_TOP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV Location: SP_TOP */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_sp_top_registers[] = { 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_sp_top_registers), 8)); -/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: uSPTP */ +/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV Location: uSPTP */ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_usptp_registers[] = { 0x0ab00, 0x0ab02, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_usptp_registers), 8)); -/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV */ +/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV */ static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_bv_registers[] = { 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_bv_registers), 8)); -/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC */ +/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC */ static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers[] = { 0x0b180, 0x0b181, 0x0b300, 0x0b301, 0x0b307, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310, @@ -552,84 +552,84 @@ static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers), 8)); -/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR */ +/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR */ static const u32 gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers[] = { 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers), 8)); -/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV */ +/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV */ static const u32 gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers[] = { 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers), 8)); -/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */ +/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BR */ static const u32 gen7_0_0_vfd_cluster_fe_pipe_br_registers[] = { 0x0a000, 0x0a009, 0x0a00e, 0x0a0ef, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_vfd_cluster_fe_pipe_br_registers), 8)); -/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */ +/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BV */ static const u32 gen7_0_0_vfd_cluster_fe_pipe_bv_registers[] = { 0x0a000, 0x0a009, 0x0a00e, 0x0a0ef, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_vfd_cluster_fe_pipe_bv_registers), 8)); -/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */ +/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BR */ static const u32 gen7_0_0_vpc_cluster_fe_pipe_br_registers[] = { 0x09300, 0x09307, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_fe_pipe_br_registers), 8)); -/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */ +/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BV */ static const u32 gen7_0_0_vpc_cluster_fe_pipe_bv_registers[] = { 0x09300, 0x09307, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_fe_pipe_bv_registers), 8)); -/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: A7XX_PIPE_BR */ +/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: PIPE_BR */ static const u32 gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers[] = { 0x09101, 0x0910c, 0x09300, 0x09307, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers), 8)); -/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: A7XX_PIPE_BV */ +/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: PIPE_BV */ static const u32 gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers[] = { 0x09101, 0x0910c, 0x09300, 0x09307, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers), 8)); -/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: A7XX_PIPE_BR */ +/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: PIPE_BR */ static const u32 gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers[] = { 0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x09236, 0x09300, 0x09307, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers), 8)); -/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: A7XX_PIPE_BV */ +/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: PIPE_BV */ static const u32 gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers[] = { 0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x09236, 0x09300, 0x09307, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers), 8)); -/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */ +/* Block: SP Cluster: noncontext Pipeline: PIPE_BR Location: HLSQ_STATE */ static const u32 gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers[] = { 0x0ae52, 0x0ae52, 0x0ae60, 0x0ae67, 0x0ae69, 0x0ae73, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers), 8)); -/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: SP_TOP */ +/* Block: SP Cluster: noncontext Pipeline: PIPE_BR Location: SP_TOP */ static const u32 gen7_0_0_sp_noncontext_pipe_br_sp_top_registers[] = { 0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae09, 0x0ae0c, 0x0ae0c, 0x0ae0f, 0x0ae0f, 0x0ae28, 0x0ae2b, 0x0ae35, 0x0ae35, 0x0ae3a, 0x0ae3f, @@ -638,7 +638,7 @@ static const u32 gen7_0_0_sp_noncontext_pipe_br_sp_top_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_sp_top_registers), 8)); -/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: uSPTP */ +/* Block: SP Cluster: noncontext Pipeline: PIPE_BR Location: uSPTP */ static const u32 gen7_0_0_sp_noncontext_pipe_br_usptp_registers[] = { 0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae09, 0x0ae0c, 0x0ae0c, 0x0ae0f, 0x0ae0f, 0x0ae30, 0x0ae32, 0x0ae35, 0x0ae35, 0x0ae3a, 0x0ae3b, @@ -647,28 +647,28 @@ static const u32 gen7_0_0_sp_noncontext_pipe_br_usptp_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_usptp_registers), 8)); -/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: HLSQ_STATE */ +/* Block: SP Cluster: noncontext Pipeline: PIPE_LPAC Location: HLSQ_STATE */ static const u32 gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers[] = { 0x0af88, 0x0af8a, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers), 8)); -/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: SP_TOP */ +/* Block: SP Cluster: noncontext Pipeline: PIPE_LPAC Location: SP_TOP */ static const u32 gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers[] = { 0x0af80, 0x0af84, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers), 8)); -/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: uSPTP */ +/* Block: SP Cluster: noncontext Pipeline: PIPE_LPAC Location: uSPTP */ static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = { 0x0af80, 0x0af84, 0x0af90, 0x0af92, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8)); -/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_NONE */ +/* Block: TPl1 Cluster: noncontext Pipeline: PIPE_NONE */ static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = { 0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c, 0x0b60f, 0x0b621, 0x0b630, 0x0b633, @@ -676,14 +676,14 @@ static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_none_registers), 8)); -/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */ +/* Block: TPl1 Cluster: noncontext Pipeline: PIPE_BR */ static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = { 0x0b600, 0x0b600, UINT_MAX, UINT_MAX, }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8)); -/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */ +/* Block: TPl1 Cluster: noncontext Pipeline: PIPE_LPAC */ static const u32 gen7_0_0_tpl1_noncontext_pipe_lpac_registers[] = { 0x0b780, 0x0b780, UINT_MAX, UINT_MAX, @@ -691,184 +691,184 @@ static const u32 gen7_0_0_tpl1_noncontext_pipe_lpac_registers[] = { static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_lpac_registers), 8)); static const struct gen7_sel_reg gen7_0_0_rb_rac_sel = { - .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST, - .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, + .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST, + .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, .val = 0x0, }; static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = { - .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST, - .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, + .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST, + .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, .val = 0x9, }; static const struct gen7_cluster_registers gen7_0_0_clusters[] = { - { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT, gen7_0_0_noncontext_pipe_br_registers, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BV, STATE_NON_CONTEXT, gen7_0_0_noncontext_pipe_bv_registers, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_LPAC, STATE_NON_CONTEXT, gen7_0_0_noncontext_pipe_lpac_registers, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT, gen7_0_0_noncontext_rb_rac_pipe_br_registers, &gen7_0_0_rb_rac_sel, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT, gen7_0_0_noncontext_rb_rbp_pipe_br_registers, &gen7_0_0_rb_rbp_sel, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_gras_cluster_gras_pipe_br_registers, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_gras_cluster_gras_pipe_bv_registers, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_gras_cluster_gras_pipe_br_registers, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_gras_cluster_gras_pipe_bv_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_pc_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_pc_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_pc_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_pc_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rac_sel, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rac_sel, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rbp_sel, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rbp_sel, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_vfd_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_vfd_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_vfd_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_vfd_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, }, }; static const struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = { - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 }, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_SP_TOP, gen7_0_0_sp_noncontext_pipe_br_sp_top_registers, 0xae00 }, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_USPTP, gen7_0_0_sp_noncontext_pipe_br_usptp_registers, 0xae00 }, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE, gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers, 0xaf80 }, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_SP_TOP, gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers, 0xaf80 }, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 }, - { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_BR, 0, A7XX_USPTP, gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 }, - { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_SP_TOP, gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_SP_TOP, gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_STATE, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_SP_TOP, gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP, gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_STATE, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_SP_TOP, gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP, gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE, gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_DP, gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_SP_TOP, gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_HLSQ_STATE, gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_SP_TOP, gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_SP_TOP, gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP, gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE, gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_HLSQ_STATE, gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_SP_TOP, gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_SP_TOP, gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP, gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers, 0xb000 }, - { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 }, - { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 }, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h index 772652eb61f3..7897622ea6f7 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h @@ -96,87 +96,87 @@ static const u32 gen7_2_0_debugbus_blocks[] = { }; static const struct gen7_shader_block gen7_2_0_shader_blocks[] = { - {A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_INST_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_INST_DATA_1, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_0_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_1_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_2_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_3_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_4_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_5_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_6_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_7_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_CB_RAM, 0x390, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_13_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_14_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_INST_TAG, 0xc0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_INST_DATA_2, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_TMO_TAG, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_SMO_TAG, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_STATE_DATA, 0x40, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_HWAVE_RAM, 0x100, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_L0_INST_BUF, 0x50, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_8_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_9_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_10_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_11_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_SP_LB_12_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, - {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CPS_RAM, 0x180, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_MISC_RAM, 0x200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_MISC_RAM_1, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM, 0x200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x38, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE}, - {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_TP0_TMO_DATA, 0x200, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_TP0_SMO_DATA, 0x80, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_INST_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_INST_DATA_1, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_0_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_1_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_2_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_3_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_4_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_5_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_6_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_7_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_CB_RAM, 0x390, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_13_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_14_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_INST_TAG, 0xc0, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_INST_DATA_2, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_TMO_TAG, 0x80, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_SMO_TAG, 0x80, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_STATE_DATA, 0x40, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_HWAVE_RAM, 0x100, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_L0_INST_BUF, 0x50, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_8_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_9_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_10_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_11_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_SP_LB_12_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP}, + {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CPS_RAM, 0x180, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_MISC_RAM, 0x200, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_MISC_RAM_1, 0x1c0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM, 0x200, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x38, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE}, + {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE}, }; static const u32 gen7_2_0_gpu_registers[] = { @@ -478,182 +478,182 @@ static const u32 gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers[] = { static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers), 8)); static const struct gen7_sel_reg gen7_2_0_rb_rac_sel = { - .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST, - .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, + .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST, + .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, .val = 0x0, }; static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = { - .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST, - .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, + .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST, + .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, .val = 0x9, }; static const struct gen7_cluster_registers gen7_2_0_clusters[] = { - { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT, gen7_2_0_noncontext_pipe_br_registers, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BV, STATE_NON_CONTEXT, gen7_2_0_noncontext_pipe_bv_registers, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_LPAC, STATE_NON_CONTEXT, gen7_0_0_noncontext_pipe_lpac_registers, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT, gen7_2_0_noncontext_rb_rac_pipe_br_registers, &gen7_2_0_rb_rac_sel, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT, gen7_2_0_noncontext_rb_rbp_pipe_br_registers, &gen7_2_0_rb_rbp_sel, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_2_0_gras_cluster_gras_pipe_br_registers, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_0, gen7_2_0_gras_cluster_gras_pipe_bv_registers, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_2_0_gras_cluster_gras_pipe_br_registers, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_1, gen7_2_0_gras_cluster_gras_pipe_bv_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_pc_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_pc_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_pc_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_pc_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_2_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rac_sel, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_2_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rac_sel, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rbp_sel, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rbp_sel, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_vfd_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_vfd_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_vfd_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_vfd_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_fe_pipe_br_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_fe_pipe_bv_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_0, gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_1, gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, }, }; static const struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = { - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 }, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_SP_TOP, gen7_0_0_sp_noncontext_pipe_br_sp_top_registers, 0xae00 }, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_USPTP, gen7_0_0_sp_noncontext_pipe_br_usptp_registers, 0xae00 }, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE, gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers, 0xaf80 }, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_SP_TOP, gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers, 0xaf80 }, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 }, - { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_BR, 0, A7XX_USPTP, gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 }, - { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_NONE, 0, A7XX_USPTP, gen7_0_0_tpl1_noncontext_pipe_none_registers, 0xb600 }, - { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_2_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_SP_TOP, gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE, gen7_2_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_SP_TOP, gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP, gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_SP_TOP, gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP, gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_SP_TOP, gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP, gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE, gen7_2_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_DP, gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_SP_TOP, gen7_2_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_2_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_2_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_HLSQ_STATE, gen7_2_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_SP_TOP, gen7_2_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_SP_TOP, gen7_2_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_2_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP, gen7_2_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE, gen7_2_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_HLSQ_STATE, gen7_2_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_SP_TOP, gen7_2_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_SP_TOP, gen7_2_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_2_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP, gen7_2_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 }, - { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers, 0xb000 }, - { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 }, - { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 }, - { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP, gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 }, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h index 0956dfca1f05..20125d1aa21d 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h @@ -118,97 +118,97 @@ static const u32 gen7_9_0_cx_debugbus_blocks[] = { }; static const struct gen7_shader_block gen7_9_0_shader_blocks[] = { - { A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_INST_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_INST_DATA_1, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_0_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_1_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_2_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_3_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_4_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_5_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_6_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_7_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_CB_RAM, 0x0390, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_13_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_14_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_INST_TAG, 0x00C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_INST_DATA_2, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_TMO_TAG, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_SMO_TAG, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_STATE_DATA, 0x0040, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_HWAVE_RAM, 0x0100, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_L0_INST_BUF, 0x0050, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_8_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_9_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_10_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_11_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_SP_LB_12_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, - { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_L2STC_TAG_RAM, 0x0200, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_L2STC_INFO_CMD, 0x0474, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0180, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CPS_MISC_RAM, 0x0640, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CPS_MISC_RAM, 0x00B0, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CPS_MISC_RAM_1, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_INST_RAM, 0x0200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0008, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_INST_RAM_TAG, 0x0014, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_INST_RAM_TAG, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_INST_RAM_TAG, 0x0004, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0020, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x03C0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0050, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0008, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_INST_RAM_1, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_STPROC_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_INST_RAM_2, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_DATAPATH_META, 0x0020, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_INDIRECT_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, - { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_TP0_TMO_DATA, 0x0200, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_TP0_SMO_DATA, 0x0080, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_INST_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_INST_DATA_1, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_0_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_1_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_2_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_3_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_4_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_5_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_6_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_7_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_CB_RAM, 0x0390, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_13_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_14_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_INST_TAG, 0x00C0, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_INST_DATA_2, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_TMO_TAG, 0x0080, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_SMO_TAG, 0x0080, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_STATE_DATA, 0x0040, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_HWAVE_RAM, 0x0100, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_L0_INST_BUF, 0x0050, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_8_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_9_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_10_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_11_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_12_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP }, + { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_L2STC_TAG_RAM, 0x0200, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_L2STC_INFO_CMD, 0x0474, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0180, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_MISC_RAM, 0x0640, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_MISC_RAM, 0x00B0, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_MISC_RAM_1, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM, 0x0200, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0050, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0008, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM_TAG, 0x0014, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM_TAG, 0x0010, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM_TAG, 0x0004, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0020, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x03C0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0280, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0050, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0008, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM_1, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_STPROC_META, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM_2, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_DATAPATH_META, 0x0020, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INDIRECT_META, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE }, }; /* @@ -226,7 +226,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_pre_crashdumper_gpu_registers), 8)); * Block : ['BROADCAST', 'CP', 'GRAS', 'GXCLKCTL'] * Block : ['PC', 'RBBM', 'RDVM', 'UCHE'] * Block : ['VFD', 'VPC', 'VSC'] - * Pipeline: A7XX_PIPE_NONE + * Pipeline: PIPE_NONE * pairs : 196 (Regs:1778) */ static const u32 gen7_9_0_gpu_registers[] = { @@ -290,7 +290,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gxclkctl_registers), 8)); /* * Block : ['GMUAO', 'GMUCX', 'GMUCX_RAM'] - * Pipeline: A7XX_PIPE_NONE + * Pipeline: PIPE_NONE * pairs : 134 (Regs:429) */ static const u32 gen7_9_0_gmu_registers[] = { @@ -334,7 +334,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gmu_registers), 8)); /* * Block : ['GMUGX'] - * Pipeline: A7XX_PIPE_NONE + * Pipeline: PIPE_NONE * pairs : 44 (Regs:454) */ static const u32 gen7_9_0_gmugx_registers[] = { @@ -355,7 +355,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gmugx_registers), 8)); /* * Block : ['CX_MISC'] - * Pipeline: A7XX_PIPE_NONE + * Pipeline: PIPE_NONE * pairs : 7 (Regs:56) */ static const u32 gen7_9_0_cx_misc_registers[] = { @@ -367,7 +367,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_cx_misc_registers), 8)); /* * Block : ['DBGC'] - * Pipeline: A7XX_PIPE_NONE + * Pipeline: PIPE_NONE * pairs : 19 (Regs:155) */ static const u32 gen7_9_0_dbgc_registers[] = { @@ -382,7 +382,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_dbgc_registers), 8)); /* * Block : ['CX_DBGC'] - * Pipeline: A7XX_PIPE_NONE + * Pipeline: PIPE_NONE * pairs : 7 (Regs:75) */ static const u32 gen7_9_0_cx_dbgc_registers[] = { @@ -396,7 +396,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_cx_dbgc_registers), 8)); * Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF'] * Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC'] * Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_NONE * pairs : 29 (Regs:573) */ @@ -417,7 +417,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_br_registers), 8)); * Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF'] * Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC'] * Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_NONE * pairs : 29 (Regs:573) */ @@ -438,7 +438,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_bv_registers), 8)); * Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF'] * Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC'] * Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC'] - * Pipeline: A7XX_PIPE_LPAC + * Pipeline: PIPE_LPAC * Cluster : A7XX_CLUSTER_NONE * pairs : 2 (Regs:7) */ @@ -450,7 +450,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_lpac_registers), 8)); /* * Block : ['RB'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_NONE * pairs : 5 (Regs:37) */ @@ -463,7 +463,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_rb_pipe_br_rac_registers), /* * Block : ['RB'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_NONE * pairs : 15 (Regs:66) */ @@ -478,7 +478,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_rb_pipe_br_rbp_registers), /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_NONE * Location: A7XX_HLSQ_STATE * pairs : 4 (Regs:28) @@ -491,7 +491,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_hlsq_state_regis /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_NONE * Location: A7XX_SP_TOP * pairs : 10 (Regs:61) @@ -506,7 +506,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_sp_top_registers /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_NONE * Location: A7XX_USPTP * pairs : 12 (Regs:62) @@ -521,7 +521,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_usptp_registers) /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_NONE * Location: A7XX_HLSQ_DP_STR * pairs : 2 (Regs:5) @@ -534,7 +534,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_regi /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_LPAC + * Pipeline: PIPE_LPAC * Cluster : A7XX_CLUSTER_NONE * Location: A7XX_HLSQ_STATE * pairs : 1 (Regs:5) @@ -547,7 +547,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_reg /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_LPAC + * Pipeline: PIPE_LPAC * Cluster : A7XX_CLUSTER_NONE * Location: A7XX_SP_TOP * pairs : 1 (Regs:6) @@ -560,7 +560,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_sp_top_registe /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_LPAC + * Pipeline: PIPE_LPAC * Cluster : A7XX_CLUSTER_NONE * Location: A7XX_USPTP * pairs : 2 (Regs:9) @@ -573,7 +573,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_usptp_register /* * Block : ['TPL1'] - * Pipeline: A7XX_PIPE_NONE + * Pipeline: PIPE_NONE * Cluster : A7XX_CLUSTER_NONE * Location: A7XX_USPTP * pairs : 5 (Regs:29) @@ -587,7 +587,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_none_usptp_regist /* * Block : ['TPL1'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_NONE * Location: A7XX_USPTP * pairs : 1 (Regs:1) @@ -600,7 +600,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_br_usptp_register /* * Block : ['TPL1'] - * Pipeline: A7XX_PIPE_LPAC + * Pipeline: PIPE_LPAC * Cluster : A7XX_CLUSTER_NONE * Location: A7XX_USPTP * pairs : 1 (Regs:1) @@ -613,7 +613,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_lpac_usptp_regist /* * Block : ['GRAS'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_GRAS * pairs : 14 (Regs:293) */ @@ -628,7 +628,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gras_pipe_br_cluster_gras_registers), 8 /* * Block : ['GRAS'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_GRAS * pairs : 14 (Regs:293) */ @@ -643,7 +643,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gras_pipe_bv_cluster_gras_registers), 8 /* * Block : ['PC'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_FE * pairs : 6 (Regs:31) */ @@ -656,7 +656,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_pc_pipe_br_cluster_fe_registers), 8)); /* * Block : ['PC'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_FE * pairs : 6 (Regs:31) */ @@ -669,7 +669,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_pc_pipe_bv_cluster_fe_registers), 8)); /* * Block : ['VFD'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_FE * pairs : 2 (Regs:236) */ @@ -681,7 +681,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vfd_pipe_br_cluster_fe_registers), 8)); /* * Block : ['VFD'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_FE * pairs : 2 (Regs:236) */ @@ -693,7 +693,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vfd_pipe_bv_cluster_fe_registers), 8)); /* * Block : ['VPC'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_FE * pairs : 2 (Regs:18) */ @@ -705,7 +705,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_fe_registers), 8)); /* * Block : ['VPC'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_PC_VS * pairs : 3 (Regs:30) */ @@ -717,7 +717,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers), 8 /* * Block : ['VPC'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_VPC_PS * pairs : 5 (Regs:76) */ @@ -730,7 +730,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers), /* * Block : ['VPC'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_FE * pairs : 2 (Regs:18) */ @@ -742,7 +742,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_fe_registers), 8)); /* * Block : ['VPC'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_PC_VS * pairs : 3 (Regs:30) */ @@ -754,7 +754,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers), 8 /* * Block : ['VPC'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_VPC_PS * pairs : 5 (Regs:76) */ @@ -767,7 +767,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers), /* * Block : ['RB'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_PS * pairs : 39 (Regs:133) */ @@ -788,7 +788,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_rb_pipe_br_cluster_ps_rac_registers), 8 /* * Block : ['RB'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_PS * pairs : 34 (Regs:100) */ @@ -808,7 +808,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers), 8 /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_SP_VS * Location: A7XX_HLSQ_STATE * pairs : 29 (Regs:215) @@ -828,7 +828,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_reg /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_SP_VS * Location: A7XX_SP_TOP * pairs : 22 (Regs:73) @@ -846,7 +846,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registe /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_SP_VS * Location: A7XX_USPTP * pairs : 16 (Regs:269) @@ -862,7 +862,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_register /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_HLSQ_STATE * pairs : 21 (Regs:334) @@ -880,7 +880,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_reg /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_HLSQ_DP * pairs : 3 (Regs:19) @@ -893,7 +893,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_regist /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_SP_TOP * pairs : 18 (Regs:77) @@ -910,7 +910,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registe /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_USPTP * pairs : 17 (Regs:333) @@ -927,7 +927,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_register /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_HLSQ_DP_STR * pairs : 1 (Regs:6) @@ -940,7 +940,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_re /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_SP_VS * Location: A7XX_HLSQ_STATE * pairs : 28 (Regs:213) @@ -959,7 +959,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_reg /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_SP_VS * Location: A7XX_SP_TOP * pairs : 21 (Regs:71) @@ -977,7 +977,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registe /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_SP_VS * Location: A7XX_USPTP * pairs : 16 (Regs:266) @@ -993,7 +993,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_register /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_LPAC + * Pipeline: PIPE_LPAC * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_HLSQ_STATE * pairs : 14 (Regs:299) @@ -1009,7 +1009,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_r /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_LPAC + * Pipeline: PIPE_LPAC * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_HLSQ_DP * pairs : 2 (Regs:13) @@ -1022,7 +1022,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_regi /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_LPAC + * Pipeline: PIPE_LPAC * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_SP_TOP * pairs : 9 (Regs:34) @@ -1037,7 +1037,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_regis /* * Block : ['SP'] - * Pipeline: A7XX_PIPE_LPAC + * Pipeline: PIPE_LPAC * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_USPTP * pairs : 11 (Regs:279) @@ -1052,7 +1052,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_regist /* * Block : ['TPL1'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_SP_VS * Location: A7XX_USPTP * pairs : 3 (Regs:10) @@ -1065,7 +1065,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_regist /* * Block : ['TPL1'] - * Pipeline: A7XX_PIPE_BR + * Pipeline: PIPE_BR * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_USPTP * pairs : 6 (Regs:42) @@ -1079,7 +1079,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_regist /* * Block : ['TPL1'] - * Pipeline: A7XX_PIPE_BV + * Pipeline: PIPE_BV * Cluster : A7XX_CLUSTER_SP_VS * Location: A7XX_USPTP * pairs : 3 (Regs:10) @@ -1092,7 +1092,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_regist /* * Block : ['TPL1'] - * Pipeline: A7XX_PIPE_LPAC + * Pipeline: PIPE_LPAC * Cluster : A7XX_CLUSTER_SP_PS * Location: A7XX_USPTP * pairs : 5 (Regs:7) @@ -1105,192 +1105,192 @@ static const u32 gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers[] = { static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers), 8)); static const struct gen7_sel_reg gen7_9_0_rb_rac_sel = { - .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST, - .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, + .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST, + .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, .val = 0, }; static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = { - .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST, - .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, + .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST, + .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, .val = 0x9, }; static const struct gen7_cluster_registers gen7_9_0_clusters[] = { - { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT, gen7_9_0_non_context_pipe_br_registers, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BV, STATE_NON_CONTEXT, gen7_9_0_non_context_pipe_bv_registers, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_LPAC, STATE_NON_CONTEXT, gen7_9_0_non_context_pipe_lpac_registers, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT, gen7_9_0_non_context_rb_pipe_br_rac_registers, &gen7_9_0_rb_rac_sel, }, - { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT, gen7_9_0_non_context_rb_pipe_br_rbp_registers, &gen7_9_0_rb_rbp_sel, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_9_0_rb_pipe_br_cluster_ps_rac_registers, &gen7_9_0_rb_rac_sel, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_9_0_rb_pipe_br_cluster_ps_rac_registers, &gen7_9_0_rb_rac_sel, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers, &gen7_9_0_rb_rbp_sel, }, - { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers, &gen7_9_0_rb_rbp_sel, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_9_0_gras_pipe_br_cluster_gras_registers, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_9_0_gras_pipe_br_cluster_gras_registers, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_0, gen7_9_0_gras_pipe_bv_cluster_gras_registers, }, - { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_1, gen7_9_0_gras_pipe_bv_cluster_gras_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0, gen7_9_0_pc_pipe_br_cluster_fe_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1, gen7_9_0_pc_pipe_br_cluster_fe_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0, gen7_9_0_pc_pipe_bv_cluster_fe_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1, gen7_9_0_pc_pipe_bv_cluster_fe_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0, gen7_9_0_vfd_pipe_br_cluster_fe_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1, gen7_9_0_vfd_pipe_br_cluster_fe_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0, gen7_9_0_vfd_pipe_bv_cluster_fe_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1, gen7_9_0_vfd_pipe_bv_cluster_fe_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0, gen7_9_0_vpc_pipe_br_cluster_fe_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1, gen7_9_0_vpc_pipe_br_cluster_fe_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_0, gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_1, gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0, gen7_9_0_vpc_pipe_bv_cluster_fe_registers, }, - { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1, gen7_9_0_vpc_pipe_bv_cluster_fe_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_0, gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers, }, - { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_1, gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_0, gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, }, - { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_1, gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, }, }; static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = { - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00}, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_SP_TOP, gen7_9_0_non_context_sp_pipe_br_sp_top_registers, 0xae00}, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_USPTP, gen7_9_0_non_context_sp_pipe_br_usptp_registers, 0xae00}, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP_STR, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_DP_STR, gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_registers, 0xae00}, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE, gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_registers, 0xaf80}, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_SP_TOP, gen7_9_0_non_context_sp_pipe_lpac_sp_top_registers, 0xaf80}, - { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_9_0_non_context_sp_pipe_lpac_usptp_registers, 0xaf80}, - { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_NONE, 0, A7XX_USPTP, gen7_9_0_non_context_tpl1_pipe_none_usptp_registers, 0xb600}, - { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_BR, 0, A7XX_USPTP, gen7_9_0_non_context_tpl1_pipe_br_usptp_registers, 0xb600}, - { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_9_0_non_context_tpl1_pipe_lpac_usptp_registers, 0xb780}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_SP_TOP, gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_HLSQ_STATE, gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_SP_TOP, gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP, gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE, gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_SP_TOP, gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_HLSQ_STATE, gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_SP_TOP, gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP, gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP, gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_SP_TOP, gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP_STR, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP_STR, gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE, gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_DP, gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_SP_TOP, gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE, gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP, gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_SP_TOP, gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP_STR, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP_STR, gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP, gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_SP_TOP, gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP, gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP_STR, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP_STR, gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP, gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_SP_TOP, gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP, gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP_STR, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP_STR, gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers, 0xb000}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP, gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers, 0xb000}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers, 0xb000}, - { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP, gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers, 0xb000}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP, gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP, gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers, 0xb000}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP, gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP, gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000}, - { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP, gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000}, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 4b5a4edd0702..1c80909e63ca 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -284,6 +284,7 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, struct adreno_smmu_fault_info *info, const char *block, u32 scratch[4]) { + struct adreno_gpu *adreno_gpu = container_of(gpu, struct adreno_gpu, base); struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; const char *type = "UNKNOWN"; @@ -336,6 +337,11 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, /* Turn off the hangcheck timer to keep it from bothering us */ timer_delete(&gpu->hangcheck_timer); + /* Let any concurrent GMU transactions know that the MMU may be + * blocked for a while and they should wait on us. + */ + reinit_completion(&adreno_gpu->fault_coredump_done); + fault_info.ttbr0 = info->ttbr0; fault_info.iova = iova; fault_info.flags = flags; @@ -343,6 +349,8 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, fault_info.block = block; msm_gpu_fault_crashstate_capture(gpu, &fault_info); + + complete_all(&adreno_gpu->fault_coredump_done); } return 0; @@ -1189,6 +1197,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, /* Only handle the core clock when GMU is not in use (or is absent). */ if (adreno_has_gmu_wrapper(adreno_gpu) || + adreno_has_rgmu(adreno_gpu) || adreno_gpu->info->family < ADRENO_6XX_GEN1) { /* * This can only be done before devm_pm_opp_of_add_table(), or @@ -1222,6 +1231,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (ret) return ret; + init_completion(&adreno_gpu->fault_coredump_done); + complete_all(&adreno_gpu->fault_coredump_done); + pm_runtime_set_autosuspend_delay(dev, adreno_gpu->info->inactive_period); pm_runtime_use_autosuspend(dev); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 390fa6720d9b..0f8d3de97636 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -27,6 +27,7 @@ enum { ADRENO_FW_PFP = 1, ADRENO_FW_GMU = 1, /* a6xx */ ADRENO_FW_GPMU = 2, + ADRENO_FW_AQE = 3, ADRENO_FW_MAX, }; @@ -50,6 +51,8 @@ enum adreno_family { ADRENO_7XX_GEN1, /* a730 family */ ADRENO_7XX_GEN2, /* a740 family */ ADRENO_7XX_GEN3, /* a750 family */ + ADRENO_8XX_GEN1, /* a830 family */ + ADRENO_8XX_GEN2, /* a840 family */ }; #define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0) @@ -71,9 +74,14 @@ enum adreno_family { (((_c) >> 8) & 0xff), \ ((_c) & 0xff) +struct adreno_gpu; + struct adreno_gpu_funcs { struct msm_gpu_funcs base; + struct msm_gpu *(*init)(struct drm_device *dev); int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); + void (*bus_halt)(struct adreno_gpu *adreno_gpu, bool gx_off); + int (*mmu_fault_handler)(void *arg, unsigned long iova, int flags, void *data); }; struct adreno_reglist { @@ -81,6 +89,13 @@ struct adreno_reglist { u32 value; }; +/* Reglist with pipe information */ +struct adreno_reglist_pipe { + u32 offset; + u32 value; + u32 pipe; +}; + struct adreno_speedbin { uint16_t fuse; uint16_t speedbin; @@ -101,7 +116,7 @@ struct adreno_info { const char *fw[ADRENO_FW_MAX]; uint32_t gmem; u64 quirks; - struct msm_gpu *(*init)(struct drm_device *dev); + const struct adreno_gpu_funcs *funcs; const char *zapfw; u32 inactive_period; union { @@ -180,6 +195,8 @@ struct adreno_gpu { uint16_t speedbin; const struct adreno_gpu_funcs *funcs; + struct completion fault_coredump_done; + /* interesting register offsets to dump: */ const unsigned int *registers; @@ -392,6 +409,16 @@ static inline int adreno_is_a610(const struct adreno_gpu *gpu) return adreno_is_revn(gpu, 610); } +static inline int adreno_is_a612(const struct adreno_gpu *gpu) +{ + return gpu->info->chip_ids[0] == 0x06010200; +} + +static inline bool adreno_has_rgmu(const struct adreno_gpu *gpu) +{ + return adreno_is_a612(gpu); +} + static inline int adreno_is_a618(const struct adreno_gpu *gpu) { return adreno_is_revn(gpu, 618); @@ -466,9 +493,9 @@ static inline int adreno_is_a610_family(const struct adreno_gpu *gpu) { if (WARN_ON_ONCE(!gpu->info)) return false; - - /* TODO: A612 */ - return adreno_is_a610(gpu) || adreno_is_a702(gpu); + return adreno_is_a610(gpu) || + adreno_is_a612(gpu) || + adreno_is_a702(gpu); } /* TODO: 615/616 */ @@ -548,6 +575,21 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu) adreno_is_a740_family(gpu); } +static inline int adreno_is_a8xx(struct adreno_gpu *gpu) +{ + return gpu->info->family >= ADRENO_8XX_GEN1; +} + +static inline int adreno_is_x285(struct adreno_gpu *gpu) +{ + return gpu->info->chip_ids[0] == 0x44070001; +} + +static inline int adreno_is_a840(struct adreno_gpu *gpu) +{ + return gpu->info->chip_ids[0] == 0x44050a01; +} + /* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */ #define ADRENO_VM_START 0x100000000ULL u64 adreno_private_vm_size(struct msm_gpu *gpu); @@ -673,12 +715,6 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) OUT_RING(ring, PKT7(opcode, cnt)); } -struct msm_gpu *a2xx_gpu_init(struct drm_device *dev); -struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); -struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); -struct msm_gpu *a5xx_gpu_init(struct drm_device *dev); -struct msm_gpu *a6xx_gpu_init(struct drm_device *dev); - static inline uint32_t get_wptr(struct msm_ringbuffer *ring) { return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2); diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h new file mode 100644 index 000000000000..13bb43ba67d3 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h @@ -0,0 +1,541 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2025 Linaro Limited + */ + +#ifndef _DPU_12_2_GLYMUR_H +#define _DPU_12_2_GLYMUR_H + +static const struct dpu_caps glymur_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = 8192, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_mdp_cfg glymur_mdp = { + .name = "top_0", + .base = 0, .len = 0x494, + .clk_ctrls = { + [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, + }, +}; + +static const struct dpu_ctl_cfg glymur_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x15000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x16000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x17000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, { + .name = "ctl_3", .id = CTL_3, + .base = 0x18000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, { + .name = "ctl_4", .id = CTL_4, + .base = 0x19000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13), + }, { + .name = "ctl_5", .id = CTL_5, + .base = 0x1a000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23), + }, { + .name = "ctl_6", .id = CTL_6, + .base = 0x1b000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 14), + }, { + .name = "ctl_7", .id = CTL_7, + .base = 0x1c000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 15), + }, +}; + +static const struct dpu_sspp_cfg glymur_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_4, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_4, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_2", .id = SSPP_VIG2, + .base = 0x8000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_4, + .xin_id = 8, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_3", .id = SSPP_VIG3, + .base = 0xa000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_4, + .xin_id = 12, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 1, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 5, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_10", .id = SSPP_DMA2, + .base = 0x28000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 9, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_11", .id = SSPP_DMA3, + .base = 0x2a000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 13, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_12", .id = SSPP_DMA4, + .base = 0x2c000, .len = 0x344, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 14, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_13", .id = SSPP_DMA5, + .base = 0x2e000, .len = 0x344, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 15, + .type = SSPP_TYPE_DMA, + }, +}; + +static const struct dpu_lm_cfg glymur_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + .dspp = DSPP_1, + }, { + .name = "lm_2", .id = LM_2, + .base = 0x46000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_3, + .pingpong = PINGPONG_2, + .dspp = DSPP_2, + }, { + .name = "lm_3", .id = LM_3, + .base = 0x47000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_2, + .pingpong = PINGPONG_3, + .dspp = DSPP_3, + }, { + .name = "lm_4", .id = LM_4, + .base = 0x48000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_5, + .pingpong = PINGPONG_4, + }, { + .name = "lm_5", .id = LM_5, + .base = 0x49000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_4, + .pingpong = PINGPONG_5, + }, { + .name = "lm_6", .id = LM_6, + .base = 0x4a000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_7, + .pingpong = PINGPONG_6, + }, { + .name = "lm_7", .id = LM_7, + .base = 0x4b000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_6, + .pingpong = PINGPONG_7, + }, +}; + +static const struct dpu_dspp_cfg glymur_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, { + .name = "dspp_1", .id = DSPP_1, + .base = 0x56000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, { + .name = "dspp_2", .id = DSPP_2, + .base = 0x58000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, { + .name = "dspp_3", .id = DSPP_3, + .base = 0x5a000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, { + .name = "dspp_4", .id = DSPP_4, + .base = 0x5c000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, { + .name = "dspp_5", .id = DSPP_5, + .base = 0x5e000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, { + .name = "dspp_6", .id = DSPP_6, + .base = 0x60000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, { + .name = "dspp_7", .id = DSPP_7, + .base = 0x62000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, +}; + +static const struct dpu_pingpong_cfg glymur_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x69000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x6a000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + }, { + .name = "pingpong_2", .id = PINGPONG_2, + .base = 0x6b000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + }, { + .name = "pingpong_3", .id = PINGPONG_3, + .base = 0x6c000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + }, { + .name = "pingpong_4", .id = PINGPONG_4, + .base = 0x6d000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), + }, { + .name = "pingpong_5", .id = PINGPONG_5, + .base = 0x6e000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), + }, { + .name = "pingpong_6", .id = PINGPONG_6, + .base = 0x6f000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 20), + }, { + .name = "pingpong_7", .id = PINGPONG_7, + .base = 0x70000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 21), + }, { + .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0, + .base = 0x66000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_4, + }, { + .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1, + .base = 0x66400, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_4, + }, +}; + +static const struct dpu_merge_3d_cfg glymur_merge_3d[] = { + { + .name = "merge_3d_0", .id = MERGE_3D_0, + .base = 0x4e000, .len = 0x1c, + }, { + .name = "merge_3d_1", .id = MERGE_3D_1, + .base = 0x4f000, .len = 0x1c, + }, { + .name = "merge_3d_2", .id = MERGE_3D_2, + .base = 0x50000, .len = 0x1c, + }, { + .name = "merge_3d_3", .id = MERGE_3D_3, + .base = 0x51000, .len = 0x1c, + }, +}; + +/* + * NOTE: Each display compression engine (DCE) contains dual hard + * slice DSC encoders so both share same base address but with + * its own different sub block address. + */ +static const struct dpu_dsc_cfg glymur_dsc[] = { + { + .name = "dce_0_0", .id = DSC_0, + .base = 0x80000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_0, + }, { + .name = "dce_0_1", .id = DSC_1, + .base = 0x80000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_1, + }, { + .name = "dce_1_0", .id = DSC_2, + .base = 0x81000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_0, + }, { + .name = "dce_1_1", .id = DSC_3, + .base = 0x81000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_1, + }, { + .name = "dce_2_0", .id = DSC_4, + .base = 0x82000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_0, + }, { + .name = "dce_2_1", .id = DSC_5, + .base = 0x82000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_1, + }, { + .name = "dce_3_0", .id = DSC_6, + .base = 0x83000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_0, + }, { + .name = "dce_3_1", .id = DSC_7, + .base = 0x83000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_1, + }, + +}; + +static const struct dpu_wb_cfg glymur_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SDM845_MASK, + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 4096, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + +static const struct dpu_cwb_cfg glymur_cwb[] = { + { + .name = "cwb_0", .id = CWB_0, + .base = 0x66200, .len = 0x20, + }, + { + .name = "cwb_1", .id = CWB_1, + .base = 0x66600, .len = 0x20, + }, + { + .name = "cwb_2", .id = CWB_2, + .base = 0x7e200, .len = 0x20, + }, + { + .name = "cwb_3", .id = CWB_3, + .base = 0x7e600, .len = 0x20, + }, +}; + +static const struct dpu_intf_cfg glymur_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x34000, .len = 0x400, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x35000, .len = 0x400, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2), + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x36000, .len = 0x400, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2), + }, { + .name = "intf_3", .id = INTF_3, + .base = 0x37000, .len = 0x400, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), + }, { + .name = "intf_4", .id = INTF_4, + .base = 0x38000, .len = 0x400, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21), + }, { + .name = "intf_5", .id = INTF_5, + .base = 0x39000, .len = 0x400, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_3, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23), + }, { + .name = "intf_6", .id = INTF_6, + .base = 0x3A000, .len = 0x400, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_2, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17), + }, { + .name = "intf_7", .id = INTF_7, + .base = 0x3b000, .len = 0x400, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19), + }, { + .name = "intf_8", .id = INTF_8, + .base = 0x3c000, .len = 0x400, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, +}; + +static const struct dpu_perf_cfg glymur_perf_data = { + .max_bw_low = 18900000, + .max_bw_high = 28500000, + .min_core_ib = 2500000, + .min_llcc_ib = 0, + .min_dram_ib = 800000, + .min_prefill_lines = 35, + .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0}, + .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sc7180_qos_linear), + .entries = sc7180_qos_linear + }, + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), + .entries = sc7180_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + /* TODO: macrotile-qseed is different from macrotile */ + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version glymur_mdss_ver = { + .core_major_ver = 12, + .core_minor_ver = 2, +}; + +const struct dpu_mdss_cfg dpu_glymur_cfg = { + .mdss_ver = &glymur_mdss_ver, + .caps = &glymur_dpu_caps, + .mdp = &glymur_mdp, + .cdm = &dpu_cdm_5_x, + .ctl_count = ARRAY_SIZE(glymur_ctl), + .ctl = glymur_ctl, + .sspp_count = ARRAY_SIZE(glymur_sspp), + .sspp = glymur_sspp, + .mixer_count = ARRAY_SIZE(glymur_lm), + .mixer = glymur_lm, + .dspp_count = ARRAY_SIZE(glymur_dspp), + .dspp = glymur_dspp, + .pingpong_count = ARRAY_SIZE(glymur_pp), + .pingpong = glymur_pp, + .dsc_count = ARRAY_SIZE(glymur_dsc), + .dsc = glymur_dsc, + .merge_3d_count = ARRAY_SIZE(glymur_merge_3d), + .merge_3d = glymur_merge_3d, + .wb_count = ARRAY_SIZE(glymur_wb), + .wb = glymur_wb, + .cwb_count = ARRAY_SIZE(glymur_cwb), + .cwb = sm8650_cwb, + .intf_count = ARRAY_SIZE(glymur_intf), + .intf = glymur_intf, + .vbif_count = ARRAY_SIZE(sm8650_vbif), + .vbif = sm8650_vbif, + .perf = &glymur_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 2f8156051d9b..c39f1908ea65 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -200,7 +200,7 @@ static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc, struct dpu_crtc_state *crtc_state) { struct dpu_crtc_mixer *m; - u32 crcs[CRTC_DUAL_MIXERS]; + u32 crcs[CRTC_QUAD_MIXERS]; int rc = 0; int i; @@ -400,7 +400,7 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, struct drm_plane *plane, struct dpu_crtc_mixer *mixer, - u32 num_mixers, + u32 lms_in_pair, enum dpu_stage stage, const struct msm_format *format, uint64_t modifier, @@ -419,7 +419,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), state, to_dpu_plane_state(state), stage_idx, - format->pixel_format, + format->pixel_format, pipe, modifier); DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n", @@ -434,7 +434,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index; /* blend config update */ - for (lm_idx = 0; lm_idx < num_mixers; lm_idx++) + for (lm_idx = 0; lm_idx < lms_in_pair; lm_idx++) mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx); } @@ -449,7 +449,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, struct dpu_plane_state *pstate = NULL; const struct msm_format *format; struct dpu_hw_ctl *ctl = mixer->lm_ctl; - u32 lm_idx; + u32 lm_idx, stage, i, pipe_idx, head_pipe_in_stage, lms_in_pair; bool bg_alpha_enable = false; DECLARE_BITMAP(active_fetch, SSPP_MAX); DECLARE_BITMAP(active_pipes, SSPP_MAX); @@ -472,22 +472,25 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) bg_alpha_enable = true; - set_bit(pstate->pipe.sspp->idx, active_fetch); - set_bit(pstate->pipe.sspp->idx, active_pipes); - _dpu_crtc_blend_setup_pipe(crtc, plane, - mixer, cstate->num_mixers, - pstate->stage, - format, fb ? fb->modifier : 0, - &pstate->pipe, 0, stage_cfg); - - if (pstate->r_pipe.sspp) { - set_bit(pstate->r_pipe.sspp->idx, active_fetch); - set_bit(pstate->r_pipe.sspp->idx, active_pipes); - _dpu_crtc_blend_setup_pipe(crtc, plane, - mixer, cstate->num_mixers, - pstate->stage, - format, fb ? fb->modifier : 0, - &pstate->r_pipe, 1, stage_cfg); + /* loop pipe per mixer pair with config in stage structure */ + for (stage = 0; stage < STAGES_PER_PLANE; stage++) { + head_pipe_in_stage = stage * PIPES_PER_STAGE; + for (i = 0; i < PIPES_PER_STAGE; i++) { + pipe_idx = i + head_pipe_in_stage; + if (!pstate->pipe[pipe_idx].sspp) + continue; + lms_in_pair = min(cstate->num_mixers - (stage * PIPES_PER_STAGE), + PIPES_PER_STAGE); + set_bit(pstate->pipe[pipe_idx].sspp->idx, active_fetch); + set_bit(pstate->pipe[pipe_idx].sspp->idx, active_pipes); + _dpu_crtc_blend_setup_pipe(crtc, plane, + &mixer[head_pipe_in_stage], + lms_in_pair, + pstate->stage, + format, fb ? fb->modifier : 0, + &pstate->pipe[pipe_idx], i, + &stage_cfg[stage]); + } } /* blend config update */ @@ -523,7 +526,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) struct dpu_crtc_mixer *mixer = cstate->mixers; struct dpu_hw_ctl *ctl; struct dpu_hw_mixer *lm; - struct dpu_hw_stage_cfg stage_cfg; + struct dpu_hw_stage_cfg stage_cfg[STAGES_PER_PLANE]; DECLARE_BITMAP(active_lms, LM_MAX); int i; @@ -544,10 +547,10 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) } /* initialize stage cfg */ - memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); + memset(&stage_cfg, 0, sizeof(stage_cfg)); memset(active_lms, 0, sizeof(active_lms)); - _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg); + _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, stage_cfg); for (i = 0; i < cstate->num_mixers; i++) { ctl = mixer[i].lm_ctl; @@ -568,13 +571,17 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) mixer[i].mixer_op_mode, ctl->idx - CTL_0); + /* + * call dpu_hw_ctl_setup_blendstage() to blend layers per stage cfg. + * stage data is shared between PIPES_PER_STAGE pipes. + */ if (ctl->ops.setup_blendstage) ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, - &stage_cfg); + &stage_cfg[i / PIPES_PER_STAGE]); if (lm->ops.setup_blendstage) lm->ops.setup_blendstage(lm, mixer[i].hw_lm->idx, - &stage_cfg); + &stage_cfg[i / PIPES_PER_STAGE]); } } @@ -1310,7 +1317,7 @@ done: return ret; } -#define MAX_CHANNELS_PER_CRTC 2 +#define MAX_CHANNELS_PER_CRTC PIPES_PER_PLANE #define MAX_HDISPLAY_SPLIT 1080 static struct msm_display_topology dpu_crtc_get_topology( @@ -1321,6 +1328,7 @@ static struct msm_display_topology dpu_crtc_get_topology( struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct msm_display_topology topology = {0}; struct drm_encoder *drm_enc; + u32 num_rt_intf; drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state, @@ -1334,11 +1342,14 @@ static struct msm_display_topology dpu_crtc_get_topology( * Dual display * 2 LM, 2 INTF ( Split display using 2 interfaces) * + * If DSC is enabled, try to use 4:4:2 topology if there is enough + * resource. Otherwise, use 2:2:2 topology. + * * Single display * 1 LM, 1 INTF * 2 LM, 1 INTF (stream merge to support high resolution interfaces) * - * If DSC is enabled, use 2 LMs for 2:2:1 topology + * If DSC is enabled, use 2:2:1 topology * * Add dspps to the reservation requirements if ctm is requested * @@ -1350,14 +1361,23 @@ static struct msm_display_topology dpu_crtc_get_topology( * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check. */ - if (topology.num_intf == 2 && !topology.cwb_enabled) - topology.num_lm = 2; - else if (topology.num_dsc == 2) + num_rt_intf = topology.num_intf; + if (topology.cwb_enabled) + num_rt_intf--; + + if (topology.num_dsc) { + if (dpu_kms->catalog->dsc_count >= num_rt_intf * 2) + topology.num_dsc = num_rt_intf * 2; + else + topology.num_dsc = num_rt_intf; + topology.num_lm = topology.num_dsc; + } else if (num_rt_intf == 2) { topology.num_lm = 2; - else if (dpu_kms->catalog->caps->has_3d_merge) + } else if (dpu_kms->catalog->caps->has_3d_merge) { topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; - else + } else { topology.num_lm = 1; + } if (crtc_state->ctm) topology.num_dspp = topology.num_lm; @@ -1600,6 +1620,17 @@ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) return 0; } +/** + * dpu_crtc_get_num_lm - Get mixer number in this CRTC pipeline + * @state: Pointer to drm crtc state object + */ +unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state) +{ + struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); + + return cstate->num_mixers; +} + #ifdef CONFIG_DEBUG_FS static int _dpu_debugfs_status_show(struct seq_file *s, void *data) { @@ -1682,15 +1713,15 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data) seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", state->crtc_x, state->crtc_y, state->crtc_w, state->crtc_h); - seq_printf(s, "\tsspp[0]:%s\n", - pstate->pipe.sspp->cap->name); - seq_printf(s, "\tmultirect[0]: mode: %d index: %d\n", - pstate->pipe.multirect_mode, pstate->pipe.multirect_index); - if (pstate->r_pipe.sspp) { - seq_printf(s, "\tsspp[1]:%s\n", - pstate->r_pipe.sspp->cap->name); - seq_printf(s, "\tmultirect[1]: mode: %d index: %d\n", - pstate->r_pipe.multirect_mode, pstate->r_pipe.multirect_index); + + for (i = 0; i < PIPES_PER_PLANE; i++) { + if (!pstate->pipe[i].sspp) + continue; + seq_printf(s, "\tsspp[%d]:%s\n", + i, pstate->pipe[i].sspp->cap->name); + seq_printf(s, "\tmultirect[%d]: mode: %d index: %d\n", + i, pstate->pipe[i].multirect_mode, + pstate->pipe[i].multirect_index); } seq_puts(s, "\n"); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index 94392b9b9245..455073c7025b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -210,7 +210,7 @@ struct dpu_crtc_state { bool bw_control; bool bw_split_vote; - struct drm_rect lm_bounds[CRTC_DUAL_MIXERS]; + struct drm_rect lm_bounds[CRTC_QUAD_MIXERS]; uint64_t input_fence_timeout_ns; @@ -218,10 +218,10 @@ struct dpu_crtc_state { /* HW Resources reserved for the crtc */ u32 num_mixers; - struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS]; + struct dpu_crtc_mixer mixers[CRTC_QUAD_MIXERS]; u32 num_ctls; - struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS]; + struct dpu_hw_ctl *hw_ctls[CRTC_QUAD_MIXERS]; enum dpu_crtc_crc_source crc_source; int crc_frame_skip_count; @@ -267,4 +267,6 @@ static inline enum dpu_crtc_client_type dpu_crtc_get_client_type( void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event); +unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state); + #endif /* _DPU_CRTC_H_ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 258edaa18fc0..d1cfe81a3373 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -55,7 +55,8 @@ #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) -#define MAX_CHANNELS_PER_ENC 2 +#define MAX_CHANNELS_PER_ENC 4 +#define MAX_CWB_PER_ENC 2 #define IDLE_SHORT_TIMEOUT 1 @@ -182,7 +183,7 @@ struct dpu_encoder_virt { struct dpu_encoder_phys *cur_master; struct dpu_encoder_phys *cur_slave; struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_cwb *hw_cwb[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_cwb *hw_cwb[MAX_CWB_PER_ENC]; struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; unsigned int dsc_mask; @@ -660,7 +661,6 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc, struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); struct msm_drm_private *priv = dpu_enc->base.dev->dev_private; struct msm_display_info *disp_info = &dpu_enc->disp_info; - struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); struct drm_connector *connector; struct drm_connector_state *conn_state; struct drm_framebuffer *fb; @@ -674,22 +674,12 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc, dsc = dpu_encoder_get_dsc_config(drm_enc); - /* We only support 2 DSC mode (with 2 LM and 1 INTF) */ - if (dsc) { - /* - * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces - * when Display Stream Compression (DSC) is enabled, - * and when enough DSC blocks are available. - * This is power-optimal and can drive up to (including) 4k - * screens. - */ - WARN(topology->num_intf > 2, - "DSC topology cannot support more than 2 interfaces\n"); - if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2) - topology->num_dsc = 2; - else - topology->num_dsc = 1; - } + /* + * Set DSC number as 1 to mark the enabled status, will be adjusted + * in dpu_crtc_get_topology() + */ + if (dsc) + topology->num_dsc = 1; connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); if (!connector) @@ -1160,7 +1150,7 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_cwb[MAX_CHANNELS_PER_ENC]; - int num_ctl, num_pp, num_dsc; + int num_ctl, num_pp, num_dsc, num_pp_per_intf; int num_cwb = 0; bool is_cwb_encoder; unsigned int dsc_mask = 0; @@ -1239,10 +1229,16 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; } + /* + * There may be 4 PP and 2 INTF for quad pipe case, so INTF is not + * mapped to PP 1:1. Let's calculate the stride with pipe/INTF + */ + num_pp_per_intf = num_pp / dpu_enc->num_phys_encs; + for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - phys->hw_pp = dpu_enc->hw_pp[i]; + phys->hw_pp = dpu_enc->hw_pp[num_pp_per_intf * i]; if (!phys->hw_pp) { DPU_ERROR_ENC(dpu_enc, "no pp block assigned at idx: %d\n", i); @@ -2171,15 +2167,12 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc) static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) { - struct dpu_hw_mixer_cfg mixer; int i, num_lm; struct dpu_global_state *global_state; - struct dpu_hw_blk *hw_lm[2]; - struct dpu_hw_mixer *hw_mixer[2]; + struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_mixer *hw_mixer[MAX_CHANNELS_PER_ENC]; struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; - memset(&mixer, 0, sizeof(mixer)); - /* reset all mixers for this encoder */ if (ctl->ops.clear_all_blendstages) ctl->ops.clear_all_blendstages(ctl); @@ -2383,7 +2376,7 @@ void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc, */ cwb_cfg.input = INPUT_MODE_LM_OUT; - for (int i = 0; i < MAX_CHANNELS_PER_ENC; i++) { + for (int i = 0; i < MAX_CWB_PER_ENC; i++) { hw_cwb = dpu_enc->hw_cwb[i]; if (!hw_cwb) continue; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index 61b22d949454..09395d7910ac 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -302,7 +302,7 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( /* Use merge_3d unless DSC MERGE topology is used */ if (phys_enc->split_role == ENC_ROLE_SOLO && - dpu_cstate->num_mixers == CRTC_DUAL_MIXERS && + (dpu_cstate->num_mixers != 1) && !dpu_encoder_use_dsc_merge(phys_enc->parent)) return BLEND_3D_H_ROW_INT; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 9f8d1bba9139..23bb39b471b7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -726,3 +726,4 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { #include "catalog/dpu_10_0_sm8650.h" #include "catalog/dpu_12_0_sm8750.h" +#include "catalog/dpu_12_2_glymur.h" diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index f0768f54e9b3..336757103b5a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -24,7 +24,7 @@ #define DPU_MAX_IMG_WIDTH 0x3fff #define DPU_MAX_IMG_HEIGHT 0x3fff -#define CRTC_DUAL_MIXERS 2 +#define CRTC_QUAD_MIXERS 4 #define MAX_XIN_COUNT 16 @@ -749,6 +749,7 @@ struct dpu_mdss_cfg { const struct dpu_format_extended *vig_formats; }; +extern const struct dpu_mdss_cfg dpu_glymur_cfg; extern const struct dpu_mdss_cfg dpu_msm8917_cfg; extern const struct dpu_mdss_cfg dpu_msm8937_cfg; extern const struct dpu_mdss_cfg dpu_msm8953_cfg; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h index b7013c9822d2..cc7cc6f6f7cd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h @@ -71,12 +71,6 @@ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr); -/** - * dpu_hw_dsc_destroy - destroys dsc driver context - * @dsc: Pointer to dsc driver context returned by dpu_hw_dsc_init - */ -void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc); - static inline struct dpu_hw_dsc *to_dpu_hw_dsc(struct dpu_hw_blk *hw) { return container_of(hw, struct dpu_hw_dsc, base); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index 175639c8bfbb..31451241f083 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -34,7 +34,9 @@ #define DPU_MAX_PLANES 4 #endif +#define STAGES_PER_PLANE 2 #define PIPES_PER_STAGE 2 +#define PIPES_PER_PLANE (PIPES_PER_STAGE * STAGES_PER_PLANE) #ifndef DPU_MAX_DE_CURVES #define DPU_MAX_DE_CURVES 3 #endif @@ -149,6 +151,10 @@ enum dpu_dspp { DSPP_1, DSPP_2, DSPP_3, + DSPP_4, + DSPP_5, + DSPP_6, + DSPP_7, DSPP_MAX }; @@ -159,6 +165,8 @@ enum dpu_ctl { CTL_3, CTL_4, CTL_5, + CTL_6, + CTL_7, CTL_MAX }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 4e5a8ecd31f7..f4c9767c418d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -1505,6 +1505,7 @@ static const struct dev_pm_ops dpu_pm_ops = { }; static const struct of_device_id dpu_dt_match[] = { + { .compatible = "qcom,glymur-dpu", .data = &dpu_glymur_cfg, }, { .compatible = "qcom,msm8917-mdp5", .data = &dpu_msm8917_cfg, }, { .compatible = "qcom,msm8937-mdp5", .data = &dpu_msm8937_cfg, }, { .compatible = "qcom,msm8953-mdp5", .data = &dpu_msm8953_cfg, }, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 905524ceeb1f..d07a6ab6e7ee 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -622,6 +622,7 @@ static void _dpu_plane_color_fill(struct dpu_plane *pdpu, struct msm_drm_private *priv = plane->dev->dev_private; struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state); u32 fill_color = (color & 0xFFFFFF) | ((alpha & 0xFF) << 24); + int i; DPU_DEBUG_PLANE(pdpu, "\n"); @@ -635,12 +636,13 @@ static void _dpu_plane_color_fill(struct dpu_plane *pdpu, return; /* update sspp */ - _dpu_plane_color_fill_pipe(pstate, &pstate->pipe, &pstate->pipe_cfg.dst_rect, - fill_color, fmt); - - if (pstate->r_pipe.sspp) - _dpu_plane_color_fill_pipe(pstate, &pstate->r_pipe, &pstate->r_pipe_cfg.dst_rect, + for (i = 0; i < PIPES_PER_PLANE; i++) { + if (!pstate->pipe[i].sspp) + continue; + _dpu_plane_color_fill_pipe(pstate, &pstate->pipe[i], + &pstate->pipe_cfg[i].dst_rect, fill_color, fmt); + } } static int dpu_plane_prepare_fb(struct drm_plane *plane, @@ -822,10 +824,14 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane, struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate; struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); - struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; - struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; + struct dpu_sw_pipe_cfg *pipe_cfg; + struct dpu_sw_pipe_cfg *r_pipe_cfg; + struct dpu_sw_pipe_cfg init_pipe_cfg; struct drm_rect fb_rect = { 0 }; + const struct drm_display_mode *mode = &crtc_state->adjusted_mode; uint32_t max_linewidth; + u32 num_lm; + int stage_id, num_stages; min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO); max_scale = MAX_DOWNSCALE_RATIO << 16; @@ -848,10 +854,10 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane, return -EINVAL; } - /* state->src is 16.16, src_rect is not */ - drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src); + num_lm = dpu_crtc_get_num_lm(crtc_state); - pipe_cfg->dst_rect = new_plane_state->dst; + /* state->src is 16.16, src_rect is not */ + drm_rect_fp_to_int(&init_pipe_cfg.src_rect, &new_plane_state->src); fb_rect.x2 = new_plane_state->fb->width; fb_rect.y2 = new_plane_state->fb->height; @@ -876,35 +882,94 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane, max_linewidth = pdpu->catalog->caps->max_linewidth; - drm_rect_rotate(&pipe_cfg->src_rect, + drm_rect_rotate(&init_pipe_cfg.src_rect, new_plane_state->fb->width, new_plane_state->fb->height, new_plane_state->rotation); - if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || - _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) { - if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { - DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", - DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); - return -E2BIG; + /* + * We have 1 mixer pair cfg for 1:1:1 and 2:2:1 topology, 2 mixer pair + * configs for left and right half screen in case of 4:4:2 topology. + * But we may have 2 rect to split wide plane that exceeds limit with 1 + * config for 2:2:1. So need to handle both wide plane splitting, and + * two halves of screen splitting for quad-pipe case. Check dest + * rectangle left/right clipping first, then check wide rectangle + * splitting in every half next. + */ + num_stages = (num_lm + 1) / 2; + /* iterate mixer configs for this plane, to separate left/right with the id */ + for (stage_id = 0; stage_id < num_stages; stage_id++) { + struct drm_rect mixer_rect = { + .x1 = stage_id * mode->hdisplay / num_stages, + .y1 = 0, + .x2 = (stage_id + 1) * mode->hdisplay / num_stages, + .y2 = mode->vdisplay + }; + int cfg_idx = stage_id * PIPES_PER_STAGE; + + pipe_cfg = &pstate->pipe_cfg[cfg_idx]; + r_pipe_cfg = &pstate->pipe_cfg[cfg_idx + 1]; + + drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src); + pipe_cfg->dst_rect = new_plane_state->dst; + + DPU_DEBUG_PLANE(pdpu, "checking src " DRM_RECT_FMT + " vs clip window " DRM_RECT_FMT "\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), + DRM_RECT_ARG(&mixer_rect)); + + /* + * If this plane does not fall into mixer rect, check next + * mixer rect. + */ + if (!drm_rect_clip_scaled(&pipe_cfg->src_rect, + &pipe_cfg->dst_rect, + &mixer_rect)) { + memset(pipe_cfg, 0, 2 * sizeof(struct dpu_sw_pipe_cfg)); + + continue; } - *r_pipe_cfg = *pipe_cfg; - pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; - pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; - r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; - r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; - } else { - memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg)); - } + pipe_cfg->dst_rect.x1 -= mixer_rect.x1; + pipe_cfg->dst_rect.x2 -= mixer_rect.x1; + + DPU_DEBUG_PLANE(pdpu, "Got clip src:" DRM_RECT_FMT " dst: " DRM_RECT_FMT "\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), DRM_RECT_ARG(&pipe_cfg->dst_rect)); + + /* Split wide rect into 2 rect */ + if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || + _dpu_plane_calc_clk(mode, pipe_cfg) > max_mdp_clk_rate) { + + if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { + DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); + return -E2BIG; + } + + memcpy(r_pipe_cfg, pipe_cfg, sizeof(struct dpu_sw_pipe_cfg)); + pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; + pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; + r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; + r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; + DPU_DEBUG_PLANE(pdpu, "Split wide plane into:" + DRM_RECT_FMT " and " DRM_RECT_FMT "\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), + DRM_RECT_ARG(&r_pipe_cfg->src_rect)); + } else { + memset(r_pipe_cfg, 0, sizeof(struct dpu_sw_pipe_cfg)); + } - drm_rect_rotate_inv(&pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, - new_plane_state->rotation); - if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) - drm_rect_rotate_inv(&r_pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, + drm_rect_rotate_inv(&pipe_cfg->src_rect, + new_plane_state->fb->width, + new_plane_state->fb->height, new_plane_state->rotation); + if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) + drm_rect_rotate_inv(&r_pipe_cfg->src_rect, + new_plane_state->fb->width, + new_plane_state->fb->height, + new_plane_state->rotation); + } + pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); return 0; @@ -954,6 +1019,23 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp, dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth); } +static bool dpu_plane_get_single_pipe_in_stage(struct dpu_plane_state *pstate, + struct dpu_sw_pipe **single_pipe, + struct dpu_sw_pipe_cfg **single_pipe_cfg, + int stage_index) +{ + int pipe_idx; + + pipe_idx = stage_index * PIPES_PER_STAGE; + if (drm_rect_width(&pstate->pipe_cfg[pipe_idx].src_rect) != 0 && + drm_rect_width(&pstate->pipe_cfg[pipe_idx + 1].src_rect) == 0) { + *single_pipe = &pstate->pipe[pipe_idx]; + *single_pipe_cfg = &pstate->pipe_cfg[pipe_idx]; + return true; + } + + return false; +} static int dpu_plane_atomic_check_sspp(struct drm_plane *plane, struct drm_atomic_state *state, @@ -963,20 +1045,17 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane, drm_atomic_get_new_plane_state(state, plane); struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); - struct dpu_sw_pipe *pipe = &pstate->pipe; - struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; - struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; - struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; - int ret = 0; - - ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, - &crtc_state->adjusted_mode, - new_plane_state); - if (ret) - return ret; + struct dpu_sw_pipe *pipe; + struct dpu_sw_pipe_cfg *pipe_cfg; + int ret = 0, i; - if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) { - ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, + for (i = 0; i < PIPES_PER_PLANE; i++) { + pipe = &pstate->pipe[i]; + pipe_cfg = &pstate->pipe_cfg[i]; + if (!drm_rect_width(&pipe_cfg->src_rect)) + continue; + DPU_DEBUG_PLANE(pdpu, "pipe %d is in use, validate it\n", i); + ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, &crtc_state->adjusted_mode, new_plane_state); if (ret) @@ -1019,17 +1098,20 @@ static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dp static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate, struct dpu_plane_state *prev_adjacent_pstate, const struct msm_format *fmt, - uint32_t max_linewidth) + uint32_t max_linewidth, int stage_index) { - struct dpu_sw_pipe *pipe = &pstate->pipe; - struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; - struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; - struct dpu_sw_pipe *prev_pipe = &prev_adjacent_pstate->pipe; - struct dpu_sw_pipe_cfg *prev_pipe_cfg = &prev_adjacent_pstate->pipe_cfg; + struct dpu_sw_pipe *pipe, *prev_pipe; + struct dpu_sw_pipe_cfg *pipe_cfg, *prev_pipe_cfg; const struct msm_format *prev_fmt = msm_framebuffer_format(prev_adjacent_pstate->base.fb); u16 max_tile_height = 1; - if (prev_adjacent_pstate->r_pipe.sspp != NULL || + if (!dpu_plane_get_single_pipe_in_stage(pstate, &pipe, + &pipe_cfg, stage_index)) + return false; + + if (!dpu_plane_get_single_pipe_in_stage(prev_adjacent_pstate, + &prev_pipe, &prev_pipe_cfg, + stage_index) || prev_pipe->multirect_mode != DPU_SSPP_MULTIRECT_NONE) return false; @@ -1044,11 +1126,6 @@ static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate, if (MSM_FORMAT_IS_UBWC(prev_fmt)) max_tile_height = max(max_tile_height, prev_fmt->tile_height); - r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; - r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; - - r_pipe->sspp = NULL; - if (dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth) && dpu_plane_is_parallel_capable(prev_pipe_cfg, prev_fmt, max_linewidth) && (pipe_cfg->dst_rect.x1 >= prev_pipe_cfg->dst_rect.x2 || @@ -1089,10 +1166,10 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); - struct dpu_sw_pipe *pipe = &pstate->pipe; - struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; - struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; - struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; + struct dpu_sw_pipe *pipe = &pstate->pipe[0]; + struct dpu_sw_pipe *r_pipe = &pstate->pipe[1]; + struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg[0]; + struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->pipe_cfg[1]; const struct drm_crtc_state *crtc_state = NULL; uint32_t max_linewidth = dpu_kms->catalog->caps->max_linewidth; @@ -1136,7 +1213,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane, drm_atomic_get_old_plane_state(state, plane); struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state); struct drm_crtc_state *crtc_state = NULL; - int ret; + int ret, i; if (IS_ERR(plane_state)) return PTR_ERR(plane_state); @@ -1154,8 +1231,8 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane, * resources are freed by dpu_crtc_assign_plane_resources(), * but clean them here. */ - pstate->pipe.sspp = NULL; - pstate->r_pipe.sspp = NULL; + for (i = 0; i < PIPES_PER_PLANE; i++) + pstate->pipe[i].sspp = NULL; return 0; } @@ -1177,37 +1254,72 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane, return 0; } +static int dpu_plane_assign_resource_in_stage(struct dpu_sw_pipe *pipe, + struct dpu_sw_pipe_cfg *pipe_cfg, + struct drm_plane_state *plane_state, + struct dpu_global_state *global_state, + struct drm_crtc *crtc, + struct dpu_rm_sspp_requirements *reqs) +{ + struct drm_plane *plane = plane_state->plane; + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + struct dpu_sw_pipe *r_pipe = pipe + 1; + struct dpu_sw_pipe_cfg *r_pipe_cfg = pipe_cfg + 1; + + if (drm_rect_width(&pipe_cfg->src_rect) == 0) + return 0; + + pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, reqs); + if (!pipe->sspp) + return -ENODEV; + pipe->multirect_index = DPU_SSPP_RECT_SOLO; + pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + + if (drm_rect_width(&r_pipe_cfg->src_rect) == 0) + return 0; + + if (dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg, + pipe->sspp, + msm_framebuffer_format(plane_state->fb), + dpu_kms->catalog->caps->max_linewidth)) + return 0; + + r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, reqs); + if (!r_pipe->sspp) + return -ENODEV; + r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; + r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + + return 0; +} + static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc, struct dpu_global_state *global_state, struct drm_atomic_state *state, struct drm_plane_state *plane_state, - struct drm_plane_state *prev_adjacent_plane_state) + struct drm_plane_state **prev_adjacent_plane_state) { const struct drm_crtc_state *crtc_state = NULL; struct drm_plane *plane = plane_state->plane; struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); struct dpu_rm_sspp_requirements reqs; - struct dpu_plane_state *pstate, *prev_adjacent_pstate; + struct dpu_plane_state *pstate, *prev_adjacent_pstate[STAGES_PER_PLANE]; struct dpu_sw_pipe *pipe; - struct dpu_sw_pipe *r_pipe; struct dpu_sw_pipe_cfg *pipe_cfg; - struct dpu_sw_pipe_cfg *r_pipe_cfg; const struct msm_format *fmt; + int i, ret; if (plane_state->crtc) crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); pstate = to_dpu_plane_state(plane_state); - prev_adjacent_pstate = prev_adjacent_plane_state ? - to_dpu_plane_state(prev_adjacent_plane_state) : NULL; - pipe = &pstate->pipe; - r_pipe = &pstate->r_pipe; - pipe_cfg = &pstate->pipe_cfg; - r_pipe_cfg = &pstate->r_pipe_cfg; - - pipe->sspp = NULL; - r_pipe->sspp = NULL; + for (i = 0; i < STAGES_PER_PLANE; i++) + prev_adjacent_pstate[i] = prev_adjacent_plane_state[i] ? + to_dpu_plane_state(prev_adjacent_plane_state[i]) : NULL; + + for (i = 0; i < PIPES_PER_PLANE; i++) + pstate->pipe[i].sspp = NULL; if (!plane_state->fb) return -EINVAL; @@ -1219,42 +1331,24 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc, reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation); - if (drm_rect_width(&r_pipe_cfg->src_rect) == 0) { - if (!prev_adjacent_pstate || - !dpu_plane_try_multirect_shared(pstate, prev_adjacent_pstate, fmt, - dpu_kms->catalog->caps->max_linewidth)) { - pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs); - if (!pipe->sspp) - return -ENODEV; - - r_pipe->sspp = NULL; + for (i = 0; i < STAGES_PER_PLANE; i++) { + if (prev_adjacent_pstate[i] && + dpu_plane_try_multirect_shared(pstate, prev_adjacent_pstate[i], fmt, + dpu_kms->catalog->caps->max_linewidth, + i)) + continue; - pipe->multirect_index = DPU_SSPP_RECT_SOLO; - pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + if (dpu_plane_get_single_pipe_in_stage(pstate, &pipe, &pipe_cfg, i)) + prev_adjacent_plane_state[i] = plane_state; - r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; - r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; - } - } else { - pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs); - if (!pipe->sspp) - return -ENODEV; - - if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg, - pipe->sspp, - msm_framebuffer_format(plane_state->fb), - dpu_kms->catalog->caps->max_linewidth)) { - /* multirect is not possible, use two SSPP blocks */ - r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs); - if (!r_pipe->sspp) - return -ENODEV; - - pipe->multirect_index = DPU_SSPP_RECT_SOLO; - pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; - - r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; - r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; - } + pipe = &pstate->pipe[i * PIPES_PER_STAGE]; + pipe_cfg = &pstate->pipe_cfg[i * PIPES_PER_STAGE]; + ret = dpu_plane_assign_resource_in_stage(pipe, pipe_cfg, + plane_state, + global_state, + crtc, &reqs); + if (ret) + return ret; } return dpu_plane_atomic_check_sspp(plane, state, crtc_state); @@ -1267,7 +1361,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state, unsigned int num_planes) { unsigned int i; - struct drm_plane_state *prev_adjacent_plane_state = NULL; + struct drm_plane_state *prev_adjacent_plane_state[STAGES_PER_PLANE] = { NULL }; for (i = 0; i < num_planes; i++) { struct drm_plane_state *plane_state = states[i]; @@ -1281,8 +1375,6 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state, prev_adjacent_plane_state); if (ret) return ret; - - prev_adjacent_plane_state = plane_state; } return 0; @@ -1318,6 +1410,7 @@ void dpu_plane_flush(struct drm_plane *plane) { struct dpu_plane *pdpu; struct dpu_plane_state *pstate; + int i; if (!plane || !plane->state) { DPU_ERROR("invalid plane\n"); @@ -1338,8 +1431,8 @@ void dpu_plane_flush(struct drm_plane *plane) /* force 100% alpha */ _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF); else { - dpu_plane_flush_csc(pdpu, &pstate->pipe); - dpu_plane_flush_csc(pdpu, &pstate->r_pipe); + for (i = 0; i < PIPES_PER_PLANE; i++) + dpu_plane_flush_csc(pdpu, &pstate->pipe[i]); } /* flag h/w flush complete */ @@ -1440,15 +1533,12 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane, struct dpu_plane *pdpu = to_dpu_plane(plane); struct drm_plane_state *state = plane->state; struct dpu_plane_state *pstate = to_dpu_plane_state(state); - struct dpu_sw_pipe *pipe = &pstate->pipe; - struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; struct drm_crtc *crtc = state->crtc; struct drm_framebuffer *fb = state->fb; bool is_rt_pipe; const struct msm_format *fmt = msm_framebuffer_format(fb); - struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; - struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; + int i; pstate->pending = true; @@ -1463,12 +1553,11 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane, crtc->base.id, DRM_RECT_ARG(&state->dst), &fmt->pixel_format, MSM_FORMAT_IS_UBWC(fmt)); - dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt, - drm_mode_vrefresh(&crtc->mode), - &pstate->layout); - - if (r_pipe->sspp) { - dpu_plane_sspp_update_pipe(plane, r_pipe, r_pipe_cfg, fmt, + for (i = 0; i < PIPES_PER_PLANE; i++) { + if (!drm_rect_width(&pstate->pipe_cfg[i].src_rect)) + continue; + dpu_plane_sspp_update_pipe(plane, &pstate->pipe[i], + &pstate->pipe_cfg[i], fmt, drm_mode_vrefresh(&crtc->mode), &pstate->layout); } @@ -1476,15 +1565,17 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane, if (pstate->needs_qos_remap) pstate->needs_qos_remap = false; - pstate->plane_fetch_bw = _dpu_plane_calc_bw(pdpu->catalog, fmt, - &crtc->mode, pipe_cfg); - - pstate->plane_clk = _dpu_plane_calc_clk(&crtc->mode, pipe_cfg); - - if (r_pipe->sspp) { - pstate->plane_fetch_bw += _dpu_plane_calc_bw(pdpu->catalog, fmt, &crtc->mode, r_pipe_cfg); + pstate->plane_fetch_bw = 0; + pstate->plane_clk = 0; + for (i = 0; i < PIPES_PER_PLANE; i++) { + if (!drm_rect_width(&pstate->pipe_cfg[i].src_rect)) + continue; + pstate->plane_fetch_bw += _dpu_plane_calc_bw(pdpu->catalog, fmt, + &crtc->mode, &pstate->pipe_cfg[i]); - pstate->plane_clk = max(pstate->plane_clk, _dpu_plane_calc_clk(&crtc->mode, r_pipe_cfg)); + pstate->plane_clk = max(pstate->plane_clk, + _dpu_plane_calc_clk(&crtc->mode, + &pstate->pipe_cfg[i])); } } @@ -1492,17 +1583,28 @@ static void _dpu_plane_atomic_disable(struct drm_plane *plane) { struct drm_plane_state *state = plane->state; struct dpu_plane_state *pstate = to_dpu_plane_state(state); - struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; + struct dpu_sw_pipe *pipe; + int i; + + for (i = 0; i < PIPES_PER_PLANE; i += 1) { + pipe = &pstate->pipe[i]; + if (!pipe->sspp) + continue; - trace_dpu_plane_disable(DRMID(plane), false, - pstate->pipe.multirect_mode); + trace_dpu_plane_disable(DRMID(plane), false, + pstate->pipe[i].multirect_mode); - if (r_pipe->sspp) { - r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; - r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + if (i % PIPES_PER_STAGE == 0) + continue; - if (r_pipe->sspp->ops.setup_multirect) - r_pipe->sspp->ops.setup_multirect(r_pipe); + /* + * clear multirect for the right pipe so that the SSPP + * can be further reused in the solo mode + */ + pipe->multirect_index = DPU_SSPP_RECT_SOLO; + pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + if (pipe->sspp->ops.setup_multirect) + pipe->sspp->ops.setup_multirect(pipe); } pstate->pending = true; @@ -1597,31 +1699,26 @@ static void dpu_plane_atomic_print_state(struct drm_printer *p, const struct drm_plane_state *state) { const struct dpu_plane_state *pstate = to_dpu_plane_state(state); - const struct dpu_sw_pipe *pipe = &pstate->pipe; - const struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; - const struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; - const struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; + const struct dpu_sw_pipe *pipe; + const struct dpu_sw_pipe_cfg *pipe_cfg; + int i; drm_printf(p, "\tstage=%d\n", pstate->stage); - if (pipe->sspp) { - drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name); - drm_printf(p, "\tmultirect_mode[0]=%s\n", + for (i = 0; i < PIPES_PER_PLANE; i++) { + pipe = &pstate->pipe[i]; + if (!pipe->sspp) + continue; + pipe_cfg = &pstate->pipe_cfg[i]; + drm_printf(p, "\tsspp[%d]=%s\n", i, pipe->sspp->cap->name); + drm_printf(p, "\tmultirect_mode[%d]=%s\n", i, dpu_get_multirect_mode(pipe->multirect_mode)); - drm_printf(p, "\tmultirect_index[0]=%s\n", + drm_printf(p, "\tmultirect_index[%d]=%s\n", i, dpu_get_multirect_index(pipe->multirect_index)); - drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect)); - drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect)); - } - - if (r_pipe->sspp) { - drm_printf(p, "\tsspp[1]=%s\n", r_pipe->sspp->cap->name); - drm_printf(p, "\tmultirect_mode[1]=%s\n", - dpu_get_multirect_mode(r_pipe->multirect_mode)); - drm_printf(p, "\tmultirect_index[1]=%s\n", - dpu_get_multirect_index(r_pipe->multirect_index)); - drm_printf(p, "\tsrc[1]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&r_pipe_cfg->src_rect)); - drm_printf(p, "\tdst[1]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&r_pipe_cfg->dst_rect)); + drm_printf(p, "\tsrc[%d]=" DRM_RECT_FMT "\n", i, + DRM_RECT_ARG(&pipe_cfg->src_rect)); + drm_printf(p, "\tdst[%d]=" DRM_RECT_FMT "\n", i, + DRM_RECT_ARG(&pipe_cfg->dst_rect)); } } @@ -1659,14 +1756,17 @@ void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state); struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + int i; if (!pdpu->is_rt_pipe) return; pm_runtime_get_sync(&dpu_kms->pdev->dev); - _dpu_plane_set_qos_ctrl(plane, &pstate->pipe, enable); - if (pstate->r_pipe.sspp) - _dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, enable); + for (i = 0; i < PIPES_PER_PLANE; i++) { + if (!pstate->pipe[i].sspp) + continue; + _dpu_plane_set_qos_ctrl(plane, &pstate->pipe[i], enable); + } pm_runtime_put_sync(&dpu_kms->pdev->dev); } #endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index a3a6e9028333..1ef5a041b8ac 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -17,10 +17,8 @@ /** * struct dpu_plane_state: Define dpu extension of drm plane state object * @base: base drm plane state object - * @pipe: software pipe description - * @r_pipe: software pipe description of the second pipe - * @pipe_cfg: software pipe configuration - * @r_pipe_cfg: software pipe configuration for the second pipe + * @pipe: software pipe description array + * @pipe_cfg: software pipe configuration array * @stage: assigned by crtc blender * @needs_qos_remap: qos remap settings need to be updated * @multirect_index: index of the rectangle of SSPP @@ -33,10 +31,8 @@ */ struct dpu_plane_state { struct drm_plane_state base; - struct dpu_sw_pipe pipe; - struct dpu_sw_pipe r_pipe; - struct dpu_sw_pipe_cfg pipe_cfg; - struct dpu_sw_pipe_cfg r_pipe_cfg; + struct dpu_sw_pipe pipe[PIPES_PER_PLANE]; + struct dpu_sw_pipe_cfg pipe_cfg[PIPES_PER_PLANE]; enum dpu_stage stage; bool needs_qos_remap; bool pending; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index d9c3b0a1d091..f6568ed8375f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -374,7 +374,11 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, if (!rm->mixer_blks[i]) continue; - lm_count = 0; + /* + * Reset lm_count to an even index. This will drop the previous + * primary mixer if failed to find its peer. + */ + lm_count &= ~1; lm_idx[lm_count] = i; if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h index 5307cbc2007c..cb24ad2a6d8d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h @@ -651,9 +651,9 @@ TRACE_EVENT(dpu_crtc_setup_mixer, TP_PROTO(uint32_t crtc_id, uint32_t plane_id, struct drm_plane_state *state, struct dpu_plane_state *pstate, uint32_t stage_idx, uint32_t pixel_format, - uint64_t modifier), + struct dpu_sw_pipe *pipe, uint64_t modifier), TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, - pixel_format, modifier), + pixel_format, pipe, modifier), TP_STRUCT__entry( __field( uint32_t, crtc_id ) __field( uint32_t, plane_id ) @@ -676,9 +676,9 @@ TRACE_EVENT(dpu_crtc_setup_mixer, __entry->dst_rect = drm_plane_state_dest(state); __entry->stage_idx = stage_idx; __entry->stage = pstate->stage; - __entry->sspp = pstate->pipe.sspp->idx; - __entry->multirect_idx = pstate->pipe.multirect_index; - __entry->multirect_mode = pstate->pipe.multirect_mode; + __entry->sspp = pipe->sspp->idx; + __entry->multirect_idx = pipe->multirect_index; + __entry->multirect_mode = pipe->multirect_mode; __entry->pixel_format = pixel_format; __entry->modifier = modifier; ), diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index da53ca88251e..e8066f9fd534 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -527,13 +527,14 @@ static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_kms *mdp4_kms = get_kms(crtc); + wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc); int ret; ret = drm_crtc_vblank_get(crtc); if (ret) return; - ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, + ret = wait_event_timeout(*queue, !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) & mdp4_crtc->flushed_mask), msecs_to_jiffies(50)); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 4c4900a7beda..373ae7d9bf01 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1234,6 +1234,7 @@ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_ctl *ctl = mdp5_cstate->ctl; + wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc); int ret; /* Should not call this function if crtc is disabled. */ @@ -1244,7 +1245,7 @@ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) if (ret) return; - ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, + ret = wait_event_timeout(*queue, ((mdp5_ctl_get_commit_status(ctl) & mdp5_crtc->flushed_mask) == 0), msecs_to_jiffies(50)); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 7c790406d533..4ca183fb61a9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -336,8 +336,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -373,8 +372,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane, int min_scale, max_scale; int ret; - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h index b5f452bd7ada..53bd1dcde15f 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h @@ -38,6 +38,7 @@ * struct msm_disp_state - structure to store current dpu state * @dev: device pointer * @drm_dev: drm device pointer + * @blocks: list head for hardware state blocks * @atomic_state: atomic state duplicated at the time of the error * @time: timestamp at which the coredump was captured */ @@ -55,7 +56,7 @@ struct msm_disp_state { /** * struct msm_disp_state_block - structure to store each hardware block state * @name: name of the block - * @drm_dev: handle to the linked list head + * @node: handle to the linked list head * @size: size of the register space of this hardware block * @state: array holding the register dump of this hardware block * @base_addr: starting address of this hardware block's register space @@ -88,8 +89,9 @@ void msm_disp_snapshot_destroy(struct drm_device *drm_dev); * msm_disp_snapshot_state_sync - synchronously snapshot display state * @kms: the kms object * - * Returns state or error + * Returns: state or error * + * Context: * Must be called with &kms->dump_mutex held */ struct msm_disp_state *msm_disp_snapshot_state_sync(struct msm_kms *kms); @@ -97,7 +99,7 @@ struct msm_disp_state *msm_disp_snapshot_state_sync(struct msm_kms *kms); /** * msm_disp_snapshot_state - trigger to dump the display snapshot * @drm_dev: handle to drm device - + * * Returns: none */ void msm_disp_snapshot_state(struct drm_device *drm_dev); @@ -114,7 +116,7 @@ void msm_disp_state_print(struct msm_disp_state *disp_state, struct drm_printer /** * msm_disp_snapshot_capture_state - utility to capture atomic state and hw registers * @disp_state: handle to msm_disp_state struct - + * * Returns: none */ void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state); @@ -122,7 +124,7 @@ void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state); /** * msm_disp_state_free - free the memory after the coredump has been read * @data: handle to struct msm_disp_state - + * * Returns: none */ void msm_disp_state_free(void *data); @@ -130,7 +132,6 @@ void msm_disp_state_free(void *data); /** * msm_disp_snapshot_add_block - add a hardware block with its register dump * @disp_state: handle to struct msm_disp_state - * @name: name of the hardware block * @len: size of the register space of the hardware block * @base_addr: starting address of the register space of the hardware block * @fmt: format in which the block names need to be printed diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index 071bcdea80f7..19b470968f4d 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -82,8 +82,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p) drm_printf(p, "kernel: " UTS_RELEASE "\n"); drm_printf(p, "module: " KBUILD_MODNAME "\n"); drm_printf(p, "dpu devcoredump\n"); - drm_printf(p, "time: %lld.%09ld\n", - state->time.tv_sec, state->time.tv_nsec); + drm_printf(p, "time: %ptSp\n", &state->time); list_for_each_entry_safe(block, tmp, &state->blocks, node) { drm_printf(p, "====================%s================\n", block->name); diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index c42fd2c17a32..cbcc7c2f0ffc 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -423,13 +423,13 @@ static void msm_dp_ctrl_config_ctrl(struct msm_dp_ctrl_private *ctrl) static void msm_dp_ctrl_lane_mapping(struct msm_dp_ctrl_private *ctrl) { - u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */ + u32 *lane_map = ctrl->link->lane_map; u32 ln_mapping; - ln_mapping = ln_0 << LANE0_MAPPING_SHIFT; - ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT; - ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT; - ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT; + ln_mapping = lane_map[0] << LANE0_MAPPING_SHIFT; + ln_mapping |= lane_map[1] << LANE1_MAPPING_SHIFT; + ln_mapping |= lane_map[2] << LANE2_MAPPING_SHIFT; + ln_mapping |= lane_map[3] << LANE3_MAPPING_SHIFT; msm_dp_write_link(ctrl, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING, ln_mapping); diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index d87d47cc7ec3..9bd9cd5c1e03 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -130,6 +130,14 @@ struct msm_dp_desc { bool wide_bus_supported; }; +static const struct msm_dp_desc msm_dp_desc_glymur[] = { + { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, + { .io_start = 0x0af5c000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, + { .io_start = 0x0af64000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, + { .io_start = 0x0af6c000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true }, + {} +}; + static const struct msm_dp_desc msm_dp_desc_sa8775p[] = { { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0af5c000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, @@ -187,6 +195,7 @@ static const struct msm_dp_desc msm_dp_desc_x1e80100[] = { }; static const struct of_device_id msm_dp_dt_match[] = { + { .compatible = "qcom,glymur-dp", .data = &msm_dp_desc_glymur }, { .compatible = "qcom,sa8775p-dp", .data = &msm_dp_desc_sa8775p }, { .compatible = "qcom,sc7180-dp", .data = &msm_dp_desc_sc7180 }, { .compatible = "qcom,sc7280-dp", .data = &msm_dp_desc_sc7280 }, diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index 66e1bbd80db3..34a91e194a12 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -6,12 +6,14 @@ #define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ #include <drm/drm_device.h> +#include <drm/drm_of.h> #include <drm/drm_print.h> #include "dp_reg.h" #include "dp_link.h" #include "dp_panel.h" +#define DP_LINK_RATE_HBR2 540000 /* kbytes */ #define DP_TEST_REQUEST_MASK 0x7F enum audio_sample_rate { @@ -1210,10 +1212,121 @@ u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp) return tbd; } +static u32 msm_dp_link_link_frequencies(struct device_node *of_node) +{ + struct device_node *endpoint; + u64 frequency = 0; + int cnt; + + endpoint = of_graph_get_endpoint_by_regs(of_node, 1, 0); /* port@1 */ + if (!endpoint) + return 0; + + cnt = of_property_count_u64_elems(endpoint, "link-frequencies"); + + if (cnt > 0) + of_property_read_u64_index(endpoint, "link-frequencies", + cnt - 1, &frequency); + of_node_put(endpoint); + + do_div(frequency, + 10 * /* from symbol rate to link rate */ + 1000); /* kbytes */ + + return frequency; +} + +/* + * Always populate msm_dp_link->lane_map with 4 lanes. + * - Use DTS "data-lanes" if present; otherwise fall back to default mapping. + * - For partial definitions, fill remaining entries with unused lanes in + * ascending order. + */ +static int msm_dp_link_lane_map(struct device *dev, struct msm_dp_link *msm_dp_link) +{ + struct device_node *of_node = dev->of_node; + struct device_node *endpoint; + int cnt = msm_dp_link->max_dp_lanes; + u32 tmp[DP_MAX_NUM_DP_LANES]; + u32 map[DP_MAX_NUM_DP_LANES] = {0, 1, 2, 3}; /* default 1:1 mapping */ + bool used[DP_MAX_NUM_DP_LANES] = {false}; + int i, j = 0, ret = -EINVAL; + + endpoint = of_graph_get_endpoint_by_regs(of_node, 1, -1); + if (endpoint) { + ret = of_property_read_u32_array(endpoint, "data-lanes", tmp, cnt); + if (ret) + dev_dbg(dev, "endpoint data-lanes read failed (ret=%d)\n", ret); + } + + if (ret) { + ret = of_property_read_u32_array(of_node, "data-lanes", tmp, cnt); + if (ret) { + dev_info(dev, "data-lanes not defined, set to default\n"); + goto out; + } + } + + for (i = 0; i < cnt; i++) { + if (tmp[i] >= DP_MAX_NUM_DP_LANES) { + dev_err(dev, "data-lanes[%d]=%u out of range\n", i, tmp[i]); + return -EINVAL; + } + used[tmp[i]] = true; + map[i] = tmp[i]; + } + + /* Fill the remaining entries with unused physical lanes (ascending) */ + for (i = cnt; i < DP_MAX_NUM_DP_LANES && j < DP_MAX_NUM_DP_LANES; j++) { + if (!used[j]) + map[i++] = j; + } + +out: + if (endpoint) + of_node_put(endpoint); + + dev_dbg(dev, "data-lanes count %d <%d %d %d %d>\n", cnt, map[0], map[1], map[2], map[3]); + memcpy(msm_dp_link->lane_map, map, sizeof(map)); + return 0; +} + +static int msm_dp_link_parse_dt(struct device *dev, struct msm_dp_link *msm_dp_link) +{ + struct device_node *of_node = dev->of_node; + int cnt; + + /* + * data-lanes is the property of msm_dp_out endpoint + */ + cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES); + if (cnt < 0) { + /* legacy code, data-lanes is the property of mdss_dp node */ + cnt = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES); + } + + if (cnt > 0) + msm_dp_link->max_dp_lanes = cnt; + else + msm_dp_link->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */ + + if (msm_dp_link_lane_map(dev, msm_dp_link)) { + dev_err(dev, "failed to parse data-lanes\n"); + return -EINVAL; + } + + msm_dp_link->max_dp_link_rate = msm_dp_link_link_frequencies(of_node); + if (!msm_dp_link->max_dp_link_rate) + msm_dp_link->max_dp_link_rate = DP_LINK_RATE_HBR2; + + return 0; +} + struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux) { struct msm_dp_link_private *link; struct msm_dp_link *msm_dp_link; + int ret; if (!dev || !aux) { DRM_ERROR("invalid input\n"); @@ -1229,5 +1342,9 @@ struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux) mutex_init(&link->psm_mutex); msm_dp_link = &link->msm_dp_link; + ret = msm_dp_link_parse_dt(dev, msm_dp_link); + if (ret) + return ERR_PTR(ret); + return msm_dp_link; } diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h index ba47c6d19fbf..b1eb2de6d2a7 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.h +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -12,6 +12,7 @@ #define DS_PORT_STATUS_CHANGED 0x200 #define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF #define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) +#define DP_MAX_NUM_DP_LANES 4 struct msm_dp_link_info { unsigned char revision; @@ -72,6 +73,10 @@ struct msm_dp_link { struct msm_dp_link_test_audio test_audio; struct msm_dp_link_phy_params phy_params; struct msm_dp_link_info link_params; + + u32 lane_map[DP_MAX_NUM_DP_LANES]; + u32 max_dp_lanes; + u32 max_dp_link_rate; }; /** diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 15b7f6c7146e..ad5d55bf009d 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -16,9 +16,6 @@ #define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4) -#define DP_MAX_NUM_DP_LANES 4 -#define DP_LINK_RATE_HBR2 540000 /* kbytes */ - struct msm_dp_panel_private { struct device *dev; struct drm_device *drm_dev; @@ -91,6 +88,7 @@ static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel) int rc, max_lttpr_lanes, max_lttpr_rate; struct msm_dp_panel_private *panel; struct msm_dp_link_info *link_info; + struct msm_dp_link *link; u8 *dpcd, major, minor; panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); @@ -105,16 +103,20 @@ static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel) major = (link_info->revision >> 4) & 0x0f; minor = link_info->revision & 0x0f; + link = panel->link; + drm_dbg_dp(panel->drm_dev, "max_lanes=%d max_link_rate=%d\n", + link->max_dp_lanes, link->max_dp_link_rate); + link_info->rate = drm_dp_max_link_rate(dpcd); link_info->num_lanes = drm_dp_max_lane_count(dpcd); /* Limit data lanes from data-lanes of endpoint property of dtsi */ - if (link_info->num_lanes > msm_dp_panel->max_dp_lanes) - link_info->num_lanes = msm_dp_panel->max_dp_lanes; + if (link_info->num_lanes > link->max_dp_lanes) + link_info->num_lanes = link->max_dp_lanes; /* Limit link rate from link-frequencies of endpoint property of dtsi */ - if (link_info->rate > msm_dp_panel->max_dp_link_rate) - link_info->rate = msm_dp_panel->max_dp_link_rate; + if (link_info->rate > link->max_dp_link_rate) + link_info->rate = link->max_dp_link_rate; /* Limit data lanes from LTTPR capabilities, if any */ max_lttpr_lanes = drm_dp_lttpr_max_lane_count(panel->link->lttpr_common_caps); @@ -173,9 +175,6 @@ int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel, panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); - drm_dbg_dp(panel->drm_dev, "max_lanes=%d max_link_rate=%d\n", - msm_dp_panel->max_dp_lanes, msm_dp_panel->max_dp_link_rate); - rc = msm_dp_panel_read_dpcd(msm_dp_panel); if (rc) { DRM_ERROR("read dpcd failed %d\n", rc); @@ -648,60 +647,6 @@ int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel) return 0; } -static u32 msm_dp_panel_link_frequencies(struct device_node *of_node) -{ - struct device_node *endpoint; - u64 frequency = 0; - int cnt; - - endpoint = of_graph_get_endpoint_by_regs(of_node, 1, 0); /* port@1 */ - if (!endpoint) - return 0; - - cnt = of_property_count_u64_elems(endpoint, "link-frequencies"); - - if (cnt > 0) - of_property_read_u64_index(endpoint, "link-frequencies", - cnt - 1, &frequency); - of_node_put(endpoint); - - do_div(frequency, - 10 * /* from symbol rate to link rate */ - 1000); /* kbytes */ - - return frequency; -} - -static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel) -{ - struct msm_dp_panel_private *panel; - struct device_node *of_node; - int cnt; - - panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); - of_node = panel->dev->of_node; - - /* - * data-lanes is the property of msm_dp_out endpoint - */ - cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES); - if (cnt < 0) { - /* legacy code, data-lanes is the property of mdss_dp node */ - cnt = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES); - } - - if (cnt > 0) - msm_dp_panel->max_dp_lanes = cnt; - else - msm_dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */ - - msm_dp_panel->max_dp_link_rate = msm_dp_panel_link_frequencies(of_node); - if (!msm_dp_panel->max_dp_link_rate) - msm_dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2; - - return 0; -} - struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux, struct msm_dp_link *link, void __iomem *link_base, @@ -709,7 +654,6 @@ struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux { struct msm_dp_panel_private *panel; struct msm_dp_panel *msm_dp_panel; - int ret; if (!dev || !aux || !link) { DRM_ERROR("invalid input\n"); @@ -729,10 +673,6 @@ struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux msm_dp_panel = &panel->msm_dp_panel; msm_dp_panel->max_bw_code = DP_LINK_BW_8_1; - ret = msm_dp_panel_parse_dt(msm_dp_panel); - if (ret) - return ERR_PTR(ret); - return msm_dp_panel; } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index d2cf401506dc..921a296852d4 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -41,9 +41,6 @@ struct msm_dp_panel { bool vsc_sdp_supported; u32 hw_revision; - u32 max_dp_lanes; - u32 max_dp_link_rate; - u32 max_bw_code; }; diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index b5969374d53f..fd19995b12b5 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -52,8 +52,6 @@ static void msm_fbdev_fb_destroy(struct fb_info *info) drm_framebuffer_remove(fb); drm_client_release(&helper->client); - drm_fb_helper_unprepare(helper); - kfree(helper); } static const struct fb_ops msm_fb_ops = { @@ -93,9 +91,9 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, { struct drm_device *dev = helper->dev; struct msm_drm_private *priv = dev->dev_private; + struct fb_info *fbi = helper->info; struct drm_framebuffer *fb = NULL; struct drm_gem_object *bo; - struct fb_info *fbi = NULL; uint64_t paddr; uint32_t format; int ret, pitch; @@ -128,13 +126,6 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, goto fail; } - fbi = drm_fb_helper_alloc_info(helper); - if (IS_ERR(fbi)) { - DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n"); - ret = PTR_ERR(fbi); - goto fail; - } - DBG("fbi=%p, dev=%p", fbi, dev); helper->funcs = &msm_fbdev_helper_funcs; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 9f7fbe577abb..017411a0bf45 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -10,8 +10,10 @@ #include <linux/shmem_fs.h> #include <linux/dma-buf.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_prime.h> #include <drm/drm_file.h> +#include <drm/drm_fourcc.h> #include <trace/events/gpu_mem.h> @@ -698,8 +700,32 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm) int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - args->pitch = align_pitch(args->width, args->bpp); - args->size = PAGE_ALIGN(args->pitch * args->height); + u32 fourcc; + u64 pitch_align; + int ret; + + /* + * Adreno needs pitch aligned to 32 pixels. Compute the number + * of bytes for a block of 32 pixels at the given color format. + * Use the result as pitch alignment. + */ + fourcc = drm_driver_color_mode_format(dev, args->bpp); + if (fourcc != DRM_FORMAT_INVALID) { + const struct drm_format_info *info; + + info = drm_format_info(fourcc); + if (!info) + return -EINVAL; + pitch_align = drm_format_info_min_pitch(info, 0, 32); + } else { + pitch_align = round_up(args->width, 32) * DIV_ROUND_UP(args->bpp, SZ_8); + } + if (!pitch_align || pitch_align > U32_MAX) + return -EINVAL; + ret = drm_mode_size_dumb(dev, args, pitch_align, 0); + if (ret) + return ret; + return msm_gem_new_handle(dev, file, args->size, MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); } diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 89a95977f41e..71d5238437eb 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -462,15 +462,20 @@ struct op_arg { bool kept; }; -static void +static int vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op) { struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL); + if (!op) + return -ENOMEM; + *op = _op; list_add_tail(&op->node, &arg->job->vm_ops); if (op->obj) drm_gem_object_get(op->obj); + + return 0; } static struct drm_gpuva * @@ -489,6 +494,7 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg) struct drm_gpuva *vma; struct sg_table *sgt; unsigned prot; + int ret; if (arg->kept) return 0; @@ -500,8 +506,6 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg) vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, vma->va.addr, vma->va.range); - vma->flags = ((struct op_arg *)arg)->flags; - if (obj) { sgt = to_msm_bo(obj)->sgt; prot = msm_gem_prot(obj); @@ -510,7 +514,7 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg) prot = IOMMU_READ | IOMMU_WRITE; } - vm_op_enqueue(arg, (struct msm_vm_op){ + ret = vm_op_enqueue(arg, (struct msm_vm_op){ .op = MSM_VM_OP_MAP, .map = { .sgt = sgt, @@ -523,6 +527,10 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg) .obj = vma->gem.obj, }); + if (ret) + return ret; + + vma->flags = ((struct op_arg *)arg)->flags; to_msm_vma(vma)->mapped = true; return 0; @@ -538,6 +546,7 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo; bool mapped = to_msm_vma(orig_vma)->mapped; unsigned flags; + int ret; vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma, orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range); @@ -547,7 +556,7 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range); - vm_op_enqueue(arg, (struct msm_vm_op){ + ret = vm_op_enqueue(arg, (struct msm_vm_op){ .op = MSM_VM_OP_UNMAP, .unmap = { .iova = unmap_start, @@ -557,6 +566,9 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) .obj = orig_vma->gem.obj, }); + if (ret) + return ret; + /* * Part of this GEM obj is still mapped, but we're going to kill the * existing VMA and replace it with one or two new ones (ie. two if @@ -618,6 +630,7 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg) struct msm_vm_bind_job *job = arg->job; struct drm_gpuva *vma = op->unmap.va; struct msm_gem_vma *msm_vma = to_msm_vma(vma); + int ret; vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, vma->va.addr, vma->va.range); @@ -650,7 +663,7 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg) if (!msm_vma->mapped) goto out_close; - vm_op_enqueue(arg, (struct msm_vm_op){ + ret = vm_op_enqueue(arg, (struct msm_vm_op){ .op = MSM_VM_OP_UNMAP, .unmap = { .iova = vma->va.addr, @@ -660,6 +673,9 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg) .obj = vma->gem.obj, }); + if (ret) + return ret; + msm_vma->mapped = false; out_close: diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 17759abc46d7..995549d0bbbc 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -197,8 +197,7 @@ static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset, drm_printf(&p, "---\n"); drm_printf(&p, "kernel: " UTS_RELEASE "\n"); drm_printf(&p, "module: " KBUILD_MODNAME "\n"); - drm_printf(&p, "time: %lld.%09ld\n", - state->time.tv_sec, state->time.tv_nsec); + drm_printf(&p, "time: %ptSp\n", &state->time); if (state->comm) drm_printf(&p, "comm: %s\n", state->comm); if (state->cmd) @@ -287,16 +286,17 @@ static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submi state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL); - drm_gpuvm_for_each_va (vma, submit->vm) { - bool dump = rd_full || (vma->flags & MSM_VMA_DUMP); + if (state->bos) + drm_gpuvm_for_each_va(vma, submit->vm) { + bool dump = rd_full || (vma->flags & MSM_VMA_DUMP); - /* Skip MAP_NULL/PRR VMAs: */ - if (!vma->gem.obj) - continue; + /* Skip MAP_NULL/PRR VMAs: */ + if (!vma->gem.obj) + continue; - msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr, - dump, vma->gem.offset, vma->va.range); - } + msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr, + dump, vma->gem.offset, vma->va.range); + } drm_exec_fini(&exec); } else { @@ -348,6 +348,10 @@ static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_v state->vm_logs = kmalloc_array( state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL); + if (!state->vm_logs) { + state->nr_vm_logs = 0; + } + for (int i = 0; i < state->nr_vm_logs; i++) { int idx = (i + first) & vm_log_mask; diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index 2d0e3e784c04..bf9a33e925ac 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -553,8 +553,10 @@ static const struct msm_mdss_data data_153k6 = { static const struct of_device_id mdss_dt_match[] = { { .compatible = "qcom,mdss", .data = &data_153k6 }, + { .compatible = "qcom,glymur-mdss", .data = &data_57k }, { .compatible = "qcom,msm8998-mdss", .data = &data_76k8 }, { .compatible = "qcom,qcm2290-mdss", .data = &data_76k8 }, + { .compatible = "qcom,qcs8300-mdss", .data = &data_74k }, { .compatible = "qcom,sa8775p-mdss", .data = &data_74k }, { .compatible = "qcom,sar2130p-mdss", .data = &data_74k }, { .compatible = "qcom,sdm670-mdss", .data = &data_76k8 }, diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml index 9459b6038217..3941e7510754 100644 --- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml @@ -7,9 +7,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <import file="adreno/adreno_pm4.xml"/> <import file="adreno/a6xx_enums.xml"/> <import file="adreno/a7xx_enums.xml"/> +<import file="adreno/a8xx_enums.xml"/> <import file="adreno/a6xx_perfcntrs.xml"/> <import file="adreno/a7xx_perfcntrs.xml"/> <import file="adreno/a6xx_descriptors.xml"/> +<import file="adreno/a8xx_descriptors.xml"/> <!-- Each register that is actually being used by driver should have "usage" defined, @@ -84,29 +86,134 @@ by a particular renderpass/blit. <bitfield name="CP_ILLEGAL_INSTR_ERROR_BV" pos="17" type="boolean" variants="A7XX-"/> </bitset> + <bitset name="A8XX_CP_GLOBAL_INT_MASK" inline="no" varset="chip"> + <bitfield name="HWFAULTBR" pos="0" type="boolean"/> + <bitfield name="HWFAULTBV" pos="1" type="boolean"/> + <bitfield name="HWFAULTLPAC" pos="2" type="boolean"/> + <bitfield name="HWFAULTAQE0" pos="3" type="boolean"/> + <bitfield name="HWFAULTAQE1" pos="4" type="boolean"/> + <bitfield name="HWFAULTDDEBR" pos="5" type="boolean"/> + <bitfield name="HWFAULTDDEBV" pos="6" type="boolean"/> + <bitfield name="SWFAULTBR" pos="16" type="boolean"/> + <bitfield name="SWFAULTBV" pos="17" type="boolean"/> + <bitfield name="SWFAULTLPAC" pos="18" type="boolean"/> + <bitfield name="SWFAULTAQE0" pos="19" type="boolean"/> + <bitfield name="SWFAULTAQE1" pos="20" type="boolean"/> + <bitfield name="SWFAULTDDEBR" pos="21" type="boolean"/> + <bitfield name="SWFAULTDDEBV" pos="22" type="boolean"/> + </bitset> + + <bitset name="A8XX_CP_INTERRUPT_STATUS_MASK_PIPE" inline="no" varset="chip"> + <bitfield name="CSFRBWRAP" pos="0" type="boolean"/> + <bitfield name="CSFIB1WRAP" pos="1" type="boolean"/> + <bitfield name="CSFIB2WRAP" pos="2" type="boolean"/> + <bitfield name="CSFIB3WRAP" pos="3" type="boolean"/> + <bitfield name="CSFSDSWRAP" pos="4" type="boolean"/> + <bitfield name="CSFMRBWRAP" pos="5" type="boolean"/> + <bitfield name="CSFVSDWRAP" pos="6" type="boolean"/> + <bitfield name="OPCODEERROR" pos="8" type="boolean"/> + <bitfield name="VSDPARITYERROR" pos="9" type="boolean"/> + <bitfield name="REGISTERPROTECTIONERROR" pos="10" type="boolean"/> + <bitfield name="ILLEGALINSTRUCTION" pos="11" type="boolean"/> + <bitfield name="SMMUFAULT" pos="12" type="boolean"/> + <bitfield name="VBIFRESPCLIENT" pos="13" type="boolean"/> + <bitfield name="VBIFRESPTYPE" pos="19" type="boolean"/> + <bitfield name="VBIFRESPREAD" pos="21" type="boolean"/> + <bitfield name="VBIFRESP" pos="22" type="boolean"/> + <bitfield name="RTWROVF" pos="23" type="boolean"/> + <bitfield name="LRZRTWROVF" pos="24" type="boolean"/> + <bitfield name="LRZRTREFCNTOVF" pos="25" type="boolean"/> + <bitfield name="LRZRTCLRRESMISS" pos="26" type="boolean"/> + </bitset> + + <bitset name="A8XX_CP_HW_FAULT_STATUS_MASK_PIPE" inline="no" varset="chip"> + <bitfield name="CSFRBFAULT" pos="0" type="boolean"/> + <bitfield name="CSFIB1FAULT" pos="1" type="boolean"/> + <bitfield name="CSFIB2FAULT" pos="2" type="boolean"/> + <bitfield name="CSFIB3FAULT" pos="3" type="boolean"/> + <bitfield name="CSFSDSFAULT" pos="4" type="boolean"/> + <bitfield name="CSFMRBFAULT" pos="5" type="boolean"/> + <bitfield name="CSFVSDFAULT" pos="6" type="boolean"/> + <bitfield name="SQEREADBURSTOVF" pos="8" type="boolean"/> + <bitfield name="EVENTENGINEOVF" pos="9" type="boolean"/> + <bitfield name="UCODEERROR" pos="10" type="boolean"/> + </bitset> + <reg64 offset="0x0800" name="CP_RB_BASE"/> <reg32 offset="0x0802" name="CP_RB_CNTL"/> + <reg32 offset="0x0803" name="CP_RB_RPTR_WR" variants="A7XX-"/> <reg64 offset="0x0804" name="CP_RB_RPTR_ADDR"/> <reg32 offset="0x0806" name="CP_RB_RPTR"/> <reg32 offset="0x0807" name="CP_RB_WPTR"/> - <reg32 offset="0x0808" name="CP_SQE_CNTL"/> - <reg32 offset="0x0812" name="CP_CP2GMU_STATUS"> + <reg32 offset="0x0808" name="CP_RB_RPTR_ADDR_BV" variants="A8XX-"/> + <reg32 offset="0x080a" name="CP_RB_RPTR_BV" variants="A8XX-"/> + <reg64 offset="0x080b" name="CP_RB_BASE_LPAC" variants="A8XX-"/> + <reg32 offset="0x080d" name="CP_RB_CNTL_LPAC" variants="A8XX-"/> + <reg32 offset="0x080e" name="CP_RB_RPTR_WR_LPAC" variants="A8XX-"/> + <reg64 offset="0x080f" name="CP_RB_RPTR_ADDR_LPAC" variants="A8XX-"/> + <reg32 offset="0x0811" name="CP_RB_RPTR_LPAC" variants="A8XX-"/> + <reg32 offset="0x0812" name="CP_RB_WPTR_LPAC" variants="A8XX-"/> + <reg32 offset="0x0814" name="CP_SMMU_STREAM_ID_LPAC" variants="A8XX-"/> + <reg32 offset="0x0808" name="CP_SQE_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x0815" name="CP_SQE_CNTL" variants="A8XX-"/> + <reg64 offset="0x0816" name="CP_SQE_INSTR_BASE" variants="A8XX-"/> + <reg64 offset="0x0818" name="CP_AQE_INSTR_BASE_0" variants="A8XX-"/> + <reg64 offset="0x081a" name="CP_AQE_INSTR_BASE_1" variants="A8XX-"/> + <reg32 offset="0x0812" name="CP_CP2GMU_STATUS" variants="A6XX-A7XX"> + <!-- Note, layout defined by microcode --> + <bitfield name="IFPC" pos="0" type="boolean"/> + </reg32> + <reg32 offset="0x0822" name="CP_CP2GMU_STATUS" variants="A8XX-"> <bitfield name="IFPC" pos="0" type="boolean"/> </reg32> - <reg32 offset="0x0821" name="CP_HW_FAULT"/> - <reg32 offset="0x0823" name="CP_INTERRUPT_STATUS" type="A6XX_CP_INT"/> - <reg32 offset="0x0824" name="CP_PROTECT_STATUS"/> - <reg32 offset="0x0825" name="CP_STATUS_1"/> - <reg64 offset="0x0830" name="CP_SQE_INSTR_BASE"/> - <reg32 offset="0x0840" name="CP_MISC_CNTL"/> - <reg32 offset="0x0844" name="CP_APRIV_CNTL"> + <reg32 offset="0x0821" name="CP_HW_FAULT" variants="A6XX-A7XX"/> + <reg32 offset="0x0823" name="CP_INTERRUPT_STATUS" type="A6XX_CP_INT" variants="A6XX-A7XX"/> + + <bitset name="a6xx_cp_protect_status" inline="yes"> + <bitfield name="ADDR" low="0" high="17"/> + <bitfield name="READ" pos="20" type="boolean"/> + <bitfield name="CP_HALTED" pos="21" type="boolean"/> + <bitfield name="ACCESS_VIOLATION" pos="22" type="boolean"/> + </bitset> + + <reg32 offset="0x0824" name="CP_PROTECT_STATUS" type="a6xx_cp_protect_status" variants="A6XX-A7XX"/> + <reg32 offset="0x084f" name="CP_PROTECT_STATUS_PIPE" type="a6xx_cp_protect_status" variants="A8XX-"/> + <reg32 offset="0x0825" name="CP_STATUS_1" variants="A6XX-A7XX"/> + + <reg32 offset="0x0825" name="CP_SEMAPHORE_REG_0" variants="A8XX-"/> + <array offset="0x082a" name="CP_SCRATCH_GLOBAL" stride="1" length="4" variants="A8XX-"> + <reg32 offset="0x0" name="REG"/> + </array> + <array offset="0x0830" name="CP_SCRATCH_PIPE" stride="1" length="5" variants="A8XX-"> + <reg32 offset="0x0" name="REG"/> + </array> + + <reg32 offset="0x0840" name="CP_RL_ERROR_DETAILS_0" variants="A8XX-"/> + <reg32 offset="0x0841" name="CP_RL_ERROR_DETAILS_1" variants="A8XX-"/> + + <reg64 offset="0x0830" name="CP_SQE_INSTR_BASE" variants="A6XX-A7XX"/> + <reg32 offset="0x0840" name="CP_MISC_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x084c" name="CP_MISC_CNTL" variants="A8XX-"/> + + <reg32 offset="0x08b0" name="CP_SQE_ICACHE_CNTL_PIPE" variants="A8XX-"/> + <reg32 offset="0x08b1" name="CP_SQE_DCACHE_CNTL_PIPE" variants="A8XX-"/> + <reg32 offset="0x08b3" name="CP_HW_FAULT_STATUS_PIPE" variants="A8XX-"/> + <reg32 offset="0x08b4" name="CP_HW_FAULT_STATUS_MASK_PIPE" variants="A8XX-"/> + <reg32 offset="0x08b5" name="CP_INTERRUPT_STATUS_GLOBAL" type="A8XX_CP_GLOBAL_INT_MASK" variants="A8XX-"/> + <reg32 offset="0x08b6" name="CP_INTERRUPT_STATUS_MASK_GLOBAL" type="A8XX_CP_GLOBAL_INT_MASK" variants="A8XX-"/> + <reg32 offset="0x08b7" name="CP_INTERRUPT_STATUS_PIPE" type="A8XX_CP_INTERRUPT_STATUS_MASK_PIPE" variants="A8XX-"/> + <reg32 offset="0x08b8" name="CP_INTERRUPT_STATUS_MASK_PIPE" variants="A8XX-"/> + <reg32 offset="0x08b9" name="CP_PIPE_STATUS_PIPE" variants="A8XX-"/> + <reg32 offset="0x08ba" name="CP_GPU_BATCH_ID_PIPE" variants="A8XX-"/> + <reg32 offset="0x08bb" name="CP_SQE_STATUS_PIPE" variants="A8XX-"/> + + <bitset name="a6xx_cp_apriv_cntl" inline="yes"> <!-- Crashdumper writes --> <bitfield pos="6" name="CDWRITE" type="boolean"/> <!-- Crashdumper reads --> <bitfield pos="5" name="CDREAD" type="boolean"/> - - <!-- 4 is unknown --> - + <!-- CP Scratch reg copy to mem --> + <bitfield pos="4" name="SCRATCHWT" type="boolean"/> <!-- RPTR shadow writes --> <bitfield pos="3" name="RBRPWB" type="boolean"/> <!-- Memory accesses from PM4 packets in the ringbuffer --> @@ -115,11 +222,16 @@ by a particular renderpass/blit. <bitfield pos="1" name="RBFETCH" type="boolean"/> <!-- Instruction cache fetches --> <bitfield pos="0" name="ICACHE" type="boolean"/> - </reg32> + </bitset> + + <reg32 offset="0x0844" name="CP_APRIV_CNTL" type="a6xx_cp_apriv_cntl" variants="A6XX-A7XX"/> + <reg32 offset="0x084d" name="CP_APRIV_CNTL_PIPE" type="a6xx_cp_apriv_cntl" variants="A8XX-"/> + <!-- Preemptions taking longer than this threshold increment PERF_CP_LONG_PREEMPTIONS: --> - <reg32 offset="0x08C0" name="CP_PREEMPT_THRESHOLD"/> + <reg32 offset="0x08c0" name="CP_PREEMPT_THRESHOLD" variants="A6XX-A7XX"/> + <reg32 offset="0x08ec" name="CP_PREEMPT_THRESHOLD" variants="A8XX-"/> <!-- all the threshold values seem to be in units of quad-dwords: --> - <reg32 offset="0x08C1" name="CP_ROQ_THRESHOLDS_1"> + <reg32 offset="0x08c1" name="CP_ROQ_THRESHOLDS_1" variants="A6XX"> <doc> b0..7 identifies where MRB data starts (and RB data ends) b8.15 identifies where VSD data starts (and MRB data ends) @@ -131,7 +243,7 @@ by a particular renderpass/blit. <bitfield name="IB1_START" low="16" high="23" shr="2"/> <bitfield name="IB2_START" low="24" high="31" shr="2"/> </reg32> - <reg32 offset="0x08C2" name="CP_ROQ_THRESHOLDS_2"> + <reg32 offset="0x08c2" name="CP_ROQ_THRESHOLDS_2" variants="A6XX"> <doc> low bits identify where CP_SET_DRAW_STATE stateobj processing starts (and IB2 data ends). I'm guessing @@ -147,176 +259,293 @@ by a particular renderpass/blit. <!-- total ROQ size: --> <bitfield name="ROQ_SIZE" low="16" high="31" shr="2"/> </reg32> - <reg32 offset="0x08C3" name="CP_MEM_POOL_SIZE"/> - <reg32 offset="0x0841" name="CP_CHICKEN_DBG"/> - <reg32 offset="0x0842" name="CP_ADDR_MODE_CNTL" type="a5xx_address_mode"/> - <reg32 offset="0x0843" name="CP_DBG_ECO_CNTL"/> - <reg32 offset="0x084F" name="CP_PROTECT_CNTL"> + <reg32 offset="0x08C3" name="CP_MEM_POOL_SIZE" variants="A6XX"/> + <reg32 offset="0x0841" name="CP_CHICKEN_DBG" variants="A6XX-A7XX"/> + <reg32 offset="0x08b2" name="CP_CHICKEN_DBG_PIPE" variants="A8XX-"/> + <reg32 offset="0x0842" name="CP_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/> + <reg32 offset="0x0843" name="CP_DBG_ECO_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x084b" name="CP_DBG_ECO_CNTL" variants="A8XX-"/> + + <bitset name="a6xx_cp_protect_cntl" inline="yes"> <bitfield pos="3" name="LAST_SPAN_INF_RANGE" type="boolean"/> <bitfield pos="1" name="ACCESS_FAULT_ON_VIOL_EN" type="boolean"/> <bitfield pos="0" name="ACCESS_PROT_EN" type="boolean"/> - </reg32> + </bitset> - <array offset="0x0883" name="CP_SCRATCH" stride="1" length="8"> + <reg32 offset="0x084f" name="CP_PROTECT_CNTL" type="a6xx_cp_protect_cntl" variants="A6XX-A7XX"/> + <bitset name="a8xx_cp_protect_cntl" inline="yes"> + <bitfield name="HALT_SQE_RANGE" low="16" high="31"/> + <bitfield name="LAST_SPAN_INF_RANGE" pos="3" type="boolean"/> + <bitfield name="ACCESS_FAULT_ON_VIOL_EN" pos="1" type="boolean"/> + <bitfield name="ACCESS_PROT_EN" pos="0" type="boolean"/> + </bitset> + + <reg32 offset="0x084e" name="CP_PROTECT_CNTL_PIPE" type="a8xx_cp_protect_cntl" variants="A8XX-"/> + + <array offset="0x0883" name="CP_SCRATCH" stride="1" length="8" variants="A6XX-A7XX"> <reg32 offset="0x0" name="REG" type="uint"/> </array> - <array offset="0x0850" name="CP_PROTECT" stride="1" length="32"> + <array offset="0x0850" name="CP_PROTECT" stride="1" length="32" variants="A6XX-A7XX"> + <reg32 offset="0x0" name="REG" type="a6x_cp_protect"/> + </array> + <array offset="0x0850" name="CP_PROTECT_GLOBAL" stride="1" length="64" variants="A8XX-"> + <reg32 offset="0x0" name="REG" type="a6x_cp_protect"/> + </array> + <array offset="0x08a0" name="CP_PROTECT_PIPE" stride="1" length="16" variants="A8XX-"> <reg32 offset="0x0" name="REG" type="a6x_cp_protect"/> </array> - <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL"> + <bitset name="a6xx_cp_context_switch_cntl" inline="yes"> <bitfield name="STOP" pos="0" type="boolean"/> <bitfield name="LEVEL" low="6" high="7"/> <bitfield name="USES_GMEM" pos="8" type="boolean"/> <bitfield name="SKIP_SAVE_RESTORE" pos="9" type="boolean"/> - </reg32> - <reg64 offset="0x08A1" name="CP_CONTEXT_SWITCH_SMMU_INFO"/> - <reg64 offset="0x08A3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR"/> - <reg64 offset="0x08A5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR"/> - <reg64 offset="0x08A7" name="CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR"/> - <reg32 offset="0x08ab" name="CP_CONTEXT_SWITCH_LEVEL_STATUS" variants="A7XX-"/> - <array offset="0x08D0" name="CP_PERFCTR_CP_SEL" stride="1" length="14"/> - <array offset="0x08e0" name="CP_BV_PERFCTR_CP_SEL" stride="1" length="7" variants="A7XX-"/> - <reg64 offset="0x0900" name="CP_CRASH_DUMP_SCRIPT_BASE"/> - <reg32 offset="0x0902" name="CP_CRASH_DUMP_CNTL"/> - <reg32 offset="0x0903" name="CP_CRASH_DUMP_STATUS"/> - <reg32 offset="0x0908" name="CP_SQE_STAT_ADDR"/> - <reg32 offset="0x0909" name="CP_SQE_STAT_DATA"/> - <reg32 offset="0x090A" name="CP_DRAW_STATE_ADDR"/> - <reg32 offset="0x090B" name="CP_DRAW_STATE_DATA"/> - <reg32 offset="0x090C" name="CP_ROQ_DBG_ADDR"/> - <reg32 offset="0x090D" name="CP_ROQ_DBG_DATA"/> - <reg32 offset="0x090E" name="CP_MEM_POOL_DBG_ADDR"/> - <reg32 offset="0x090F" name="CP_MEM_POOL_DBG_DATA"/> - <reg32 offset="0x0910" name="CP_SQE_UCODE_DBG_ADDR"/> - <reg32 offset="0x0911" name="CP_SQE_UCODE_DBG_DATA"/> - <reg64 offset="0x0928" name="CP_IB1_BASE"/> - <reg32 offset="0x092A" name="CP_IB1_REM_SIZE"/> - <reg64 offset="0x092B" name="CP_IB2_BASE"/> - <reg32 offset="0x092D" name="CP_IB2_REM_SIZE"/> + </bitset> + + <reg32 offset="0x08a0" name="CP_CONTEXT_SWITCH_CNTL" type="a6xx_cp_context_switch_cntl" variants="A6XX-A7XX"/> + <reg32 offset="0x08c0" name="CP_CONTEXT_SWITCH_CNTL" type="a6xx_cp_context_switch_cntl" variants="A8XX-"/> + + <reg64 offset="0x08a1" name="CP_CONTEXT_SWITCH_SMMU_INFO" variants="A6XX-A7XX"/> + <reg64 offset="0x08a3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR" variants="A6XX-A7XX"/> + <reg64 offset="0x08a5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR" variants="A6XX-A7XX"/> + <reg64 offset="0x08a7" name="CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR" variants="A6XX-A7XX"/> + <reg32 offset="0x08ab" name="CP_CONTEXT_SWITCH_LEVEL_STATUS" variants="A7XX"/> + + <reg64 offset="0x08c1" name="CP_CONTEXT_SWITCH_SMMU_INFO" variants="A8XX-"/> + <reg64 offset="0x08c3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR" variants="A8XX-"/> + <reg64 offset="0x08c5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR" variants="A8XX-"/> + <reg64 offset="0x08c7" name="CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR" variants="A8XX-"/> + <reg32 offset="0x08cb" name="CP_CONTEXT_SWITCH_LEVEL_STATUS" variants="A8XX-"/> + + <array offset="0x08d0" name="CP_PERFCTR_CP_SEL" stride="1" length="14" variants="A6XX-A7XX"/> + <array offset="0x08d0" name="CP_PERFCTR_CP_SEL" stride="1" length="21" variants="A8XX-"/> + <array offset="0x08e0" name="CP_BV_PERFCTR_CP_SEL" stride="1" length="7" variants="A7XX"/> + <reg64 offset="0x0900" name="CP_CRASH_DUMP_SCRIPT_BASE" variants="A6XX-A7XX"/> + <reg32 offset="0x0902" name="CP_CRASH_DUMP_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x0903" name="CP_CRASH_DUMP_STATUS" variants="A6XX-A7XX"/> + <reg64 offset="0x0842" name="CP_CRASH_DUMP_SCRIPT_BASE" variants="A8XX-"/> + <reg32 offset="0x0844" name="CP_CRASH_DUMP_CNTL" variants="A8XX-"/> + <reg32 offset="0x0845" name="CP_CRASH_DUMP_STATUS" variants="A8XX-"/> + <reg32 offset="0x0908" name="CP_SQE_STAT_ADDR" variants="A6XX-A7XX"/> + <reg32 offset="0x0909" name="CP_SQE_STAT_DATA" variants="A6XX-A7XX"/> + <reg32 offset="0x090a" name="CP_DRAW_STATE_ADDR" variants="A6XX-A7XX"/> + <reg32 offset="0x090b" name="CP_DRAW_STATE_DATA" variants="A6XX-A7XX"/> + <reg32 offset="0x090c" name="CP_ROQ_DBG_ADDR" variants="A6XX-A7XX"/> + <reg32 offset="0x090d" name="CP_ROQ_DBG_DATA" variants="A6XX-A7XX"/> + <reg32 offset="0x090e" name="CP_MEM_POOL_DBG_ADDR" variants="A6XX-A7XX"/> + <reg32 offset="0x090f" name="CP_MEM_POOL_DBG_DATA" variants="A6XX-A7XX"/> + <reg32 offset="0x0910" name="CP_SQE_UCODE_DBG_ADDR" variants="A6XX-A7XX"/> + <reg32 offset="0x0911" name="CP_SQE_UCODE_DBG_DATA" variants="A6XX-A7XX"/> + + <reg32 offset="0x08f0" name="CP_SQE_STAT_ADDR_PIPE" variants="A8XX-"/> + <reg32 offset="0x08f1" name="CP_SQE_STAT_DATA_PIPE" variants="A8XX-"/> + <reg32 offset="0x08f2" name="CP_DRAW_STATE_ADDR_PIPE" variants="A8XX-"/> + <reg32 offset="0x08f3" name="CP_DRAW_STATE_DATA_PIPE" variants="A8XX-"/> + <reg32 offset="0x08f4" name="CP_ROQ_DBG_ADDR_PIPE" variants="A8XX-"/> + <reg32 offset="0x08f5" name="CP_ROQ_DBG_DATA_PIPE" variants="A8XX-"/> + <reg32 offset="0x08f6" name="CP_MEM_POOL_DBG_ADDR_PIPE" variants="A8XX-"/> + <reg32 offset="0x08f7" name="CP_MEM_POOL_DBG_DATA_PIPE" variants="A8XX-"/> + <reg32 offset="0x08f8" name="CP_SQE_UCODE_DBG_ADDR_PIPE" variants="A8XX-"/> + <reg32 offset="0x08f9" name="CP_SQE_UCODE_DBG_DATA_PIPE" variants="A8XX-"/> + <reg32 offset="0x08fa" name="CP_RESOURCE_TABLE_DBG_ADDR_BV" variants="A8XX-"/> + <reg32 offset="0x08fb" name="CP_RESOURCE_TABLE_DBG_DATA_BV" variants="A8XX-"/> + <reg32 offset="0x08fc" name="CP_FIFO_DBG_ADDR_LPAC" variants="A8XX-"/> + <reg32 offset="0x08fd" name="CP_FIFO_DBG_DATA_LPAC" variants="A8XX-"/> + <reg32 offset="0x08fe" name="CP_FIFO_DBG_ADDR_DDE_PIPE" variants="A8XX-"/> + <reg32 offset="0x08ff" name="CP_FIFO_DBG_DATA_DDE_PIPE" variants="A8XX-"/> + + <reg32 offset="0x0b00" name="CP_SLICE_MEM_POOL_DBG_ADDR_PIPE" variants="A8XX-"/> + <reg32 offset="0x0b01" name="CP_SLICE_MEM_POOL_DBG_DATA_PIPE" variants="A8XX-"/> + <reg32 offset="0x0b93" name="CP_SLICE_CHICKEN_DBG_PIPE" variants="A8XX-"/> + + <reg64 offset="0x0928" name="CP_IB1_BASE" variants="A6XX-A7XX"/> + <reg32 offset="0x092a" name="CP_IB1_REM_SIZE" variants="A6XX-A7XX"/> + <reg64 offset="0x092b" name="CP_IB2_BASE" variants="A6XX-A7XX"/> + <reg32 offset="0x092d" name="CP_IB2_REM_SIZE" variants="A6XX-A7XX"/> <!-- SDS == CP_SET_DRAW_STATE: --> - <reg64 offset="0x092e" name="CP_SDS_BASE"/> - <reg32 offset="0x0930" name="CP_SDS_REM_SIZE"/> + <reg64 offset="0x092e" name="CP_SDS_BASE" variants="A6XX-A7XX"/> + <reg32 offset="0x0930" name="CP_SDS_REM_SIZE" variants="A6XX-A7XX"/> <!-- MRB == MEM_READ_ADDR/$addr in SQE firmware --> - <reg64 offset="0x0931" name="CP_MRB_BASE"/> - <reg32 offset="0x0933" name="CP_MRB_REM_SIZE"/> + <reg64 offset="0x0931" name="CP_MRB_BASE" variants="A6XX-A7XX"/> + <reg32 offset="0x0933" name="CP_MRB_REM_SIZE" variants="A6XX-A7XX"/> <!-- VSD == Visibility Stream Decode This is used by CP to read the draw stream and skip empty draws --> - <reg64 offset="0x0934" name="CP_VSD_BASE"/> + <reg64 offset="0x0934" name="CP_VSD_BASE" variants="A6XX-A7XX"/> + + <reg64 offset="0x0900" name="CP_IB1_BASE" variants="A8XX-"/> + <reg32 offset="0x0902" name="CP_IB1_REM_SIZE" variants="A8XX-"/> + <reg32 offset="0x0903" name="CP_IB1_INIT_SIZE" variants="A8XX-"/> + <reg64 offset="0x0904" name="CP_IB2_BASE" variants="A8XX-"/> + <reg32 offset="0x0906" name="CP_IB2_REM_SIZE" variants="A8XX-"/> + <reg32 offset="0x0907" name="CP_IB2_INIT_SIZE" variants="A8XX-"/> + <reg64 offset="0x0908" name="CP_IB3_BASE" variants="A8XX-"/> + <reg32 offset="0x090a" name="CP_IB3_REM_SIZE" variants="A8XX-"/> + <reg32 offset="0x090b" name="CP_IB3_INIT_SIZE" variants="A8XX-"/> + <reg64 offset="0x090c" name="CP_SDS_BASE" variants="A8XX-"/> + <reg32 offset="0x090e" name="CP_SDS_REM_SIZE" variants="A8XX-"/> + <reg32 offset="0x090f" name="CP_SDS_INIT_SIZE" variants="A8XX-"/> + <reg64 offset="0x0910" name="CP_MRB_BASE" variants="A8XX-"/> + <reg32 offset="0x0912" name="CP_MRB_REM_SIZE" variants="A8XX-"/> + <reg32 offset="0x0913" name="CP_MRB_INIT_SIZE" variants="A8XX-"/> + <reg64 offset="0x0914" name="CP_VSD_BASE" variants="A8XX-"/> + <reg32 offset="0x0916" name="CP_VSD_REM_SIZE" variants="A8XX-"/> + <reg32 offset="0x0917" name="CP_VSD_INIT_SIZE" variants="A8XX-"/> <bitset name="a6xx_roq_status" inline="yes"> <bitfield name="RPTR" low="0" high="9"/> <bitfield name="WPTR" low="16" high="25"/> </bitset> - <reg32 offset="0x0939" name="CP_ROQ_RB_STATUS" type="a6xx_roq_status"/> - <reg32 offset="0x093a" name="CP_ROQ_IB1_STATUS" type="a6xx_roq_status"/> - <reg32 offset="0x093b" name="CP_ROQ_IB2_STATUS" type="a6xx_roq_status"/> - <reg32 offset="0x093c" name="CP_ROQ_SDS_STATUS" type="a6xx_roq_status"/> - <reg32 offset="0x093d" name="CP_ROQ_MRB_STATUS" type="a6xx_roq_status"/> - <reg32 offset="0x093e" name="CP_ROQ_VSD_STATUS" type="a6xx_roq_status"/> - - <reg32 offset="0x0943" name="CP_IB1_INIT_SIZE"/> - <reg32 offset="0x0944" name="CP_IB2_INIT_SIZE"/> - <reg32 offset="0x0945" name="CP_SDS_INIT_SIZE"/> - <reg32 offset="0x0946" name="CP_MRB_INIT_SIZE"/> - <reg32 offset="0x0947" name="CP_VSD_INIT_SIZE"/> - - <reg32 offset="0x0948" name="CP_ROQ_AVAIL_RB"> + <reg32 offset="0x0939" name="CP_ROQ_RB_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/> + <reg32 offset="0x093a" name="CP_ROQ_IB1_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/> + <reg32 offset="0x093b" name="CP_ROQ_IB2_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/> + <reg32 offset="0x093c" name="CP_ROQ_SDS_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/> + <reg32 offset="0x093d" name="CP_ROQ_MRB_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/> + <reg32 offset="0x093e" name="CP_ROQ_VSD_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/> + + <reg32 offset="0x0920" name="CP_ROQ_RB_STATUS" type="a6xx_roq_status" variants="A8XX-"/> + <reg32 offset="0x0921" name="CP_ROQ_IB1_STATUS" type="a6xx_roq_status" variants="A8XX-"/> + <reg32 offset="0x0922" name="CP_ROQ_IB2_STATUS" type="a6xx_roq_status" variants="A8XX-"/> + <reg32 offset="0x0923" name="CP_ROQ_IB3_STATUS" type="a6xx_roq_status" variants="A8XX-"/> + <reg32 offset="0x0924" name="CP_ROQ_SDS_STATUS" type="a6xx_roq_status" variants="A8XX-"/> + <reg32 offset="0x0925" name="CP_ROQ_MRB_STATUS" type="a6xx_roq_status" variants="A8XX-"/> + <reg32 offset="0x0926" name="CP_ROQ_VSD_STATUS" type="a6xx_roq_status" variants="A8XX-"/> + + <reg32 offset="0x0943" name="CP_IB1_INIT_SIZE" variants="A6XX-A7XX"/> + <reg32 offset="0x0944" name="CP_IB2_INIT_SIZE" variants="A6XX-A7XX"/> + <reg32 offset="0x0945" name="CP_SDS_INIT_SIZE" variants="A6XX-A7XX"/> + <reg32 offset="0x0946" name="CP_MRB_INIT_SIZE" variants="A6XX-A7XX"/> + <reg32 offset="0x0947" name="CP_VSD_INIT_SIZE" variants="A6XX-A7XX"/> + + <bitset name="a6xx_cp_roq_avail" inline="yes"> <doc>number of remaining dwords incl current dword being consumed?</doc> <bitfield name="REM" low="16" high="31"/> - </reg32> - <reg32 offset="0x0949" name="CP_ROQ_AVAIL_IB1"> - <doc>number of remaining dwords incl current dword being consumed?</doc> - <bitfield name="REM" low="16" high="31"/> - </reg32> - <reg32 offset="0x094a" name="CP_ROQ_AVAIL_IB2"> + </bitset> + + <reg32 offset="0x0948" name="CP_ROQ_AVAIL_RB" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/> + <reg32 offset="0x0949" name="CP_ROQ_AVAIL_IB1" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/> + <reg32 offset="0x094a" name="CP_ROQ_AVAIL_IB2" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/> + <reg32 offset="0x094b" name="CP_ROQ_AVAIL_SDS" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/> + <reg32 offset="0x094c" name="CP_ROQ_AVAIL_MRB" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/> + <reg32 offset="0x094d" name="CP_ROQ_AVAIL_VSD" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/> + + <reg32 offset="0x0918" name="CP_ROQ_AVAIL_RB" type="a6xx_cp_roq_avail" variants="A8XX-"/> + <reg32 offset="0x0919" name="CP_ROQ_AVAIL_IB1" type="a6xx_cp_roq_avail" variants="A8XX-"/> + <reg32 offset="0x091a" name="CP_ROQ_AVAIL_IB2" type="a6xx_cp_roq_avail" variants="A8XX-"/> + <reg32 offset="0x091b" name="CP_ROQ_AVAIL_IB3" type="a6xx_cp_roq_avail" variants="A8XX-"/> + <reg32 offset="0x091c" name="CP_ROQ_AVAIL_SDS" type="a6xx_cp_roq_avail" variants="A8XX-"/> + <reg32 offset="0x091d" name="CP_ROQ_AVAIL_MRB" type="a6xx_cp_roq_avail" variants="A8XX-"/> + <reg32 offset="0x091e" name="CP_ROQ_AVAIL_VSD" type="a6xx_cp_roq_avail" variants="A8XX-"/> + + <bitset name="a7xx_aperture_cntl" inline="yes"> + <bitfield name="PIPE" low="12" high="13" type="adreno_pipe"/> + <bitfield name="CLUSTER" low="8" high="10" type="a7xx_cluster"/> + <bitfield name="CONTEXT" low="4" high="5"/> + </bitset> + <reg64 offset="0x0980" name="CP_ALWAYS_ON_COUNTER" variants="A6XX-A7XX"/> + <reg64 offset="0x0982" name="CP_ALWAYS_ON_CONTEXT" variants="A6XX-A7XX"/> + <reg64 offset="0x08e7" name="CP_ALWAYS_ON_COUNTER" variants="A8XX-"/> + <reg64 offset="0x08e9" name="CP_ALWAYS_ON_CONTEXT" variants="A8XX-"/> + <reg32 offset="0x098d" name="CP_AHB_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x0838" name="CP_AHB_CNTL" variants="A8XX-"/> + <reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" variants="A6XX"/> + <reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" type="a7xx_aperture_cntl" variants="A7XX"/> + <reg32 offset="0x0A01" name="CP_APERTURE_CNTL_SQE" variants="A6XX"/> + <reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" variants="A6XX"/> + <reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" type="a7xx_aperture_cntl" variants="A7XX"/> + + <array offset="0x0a9c" name="CP_RESERVED_REG" stride="1" length="4" variants="A7XX"/> + <array offset="0x0958" name="CP_RESERVED_REG" stride="1" length="4" variants="A8XX-"/> + + <bitset name="a8xx_aperture_cntl" inline="yes"> + <bitfield name="CONTEXTID3D" low="4" high="5"/> + <bitfield name="CLUSTERID" low="8" high="11"/> + <bitfield name="PIPEID" low="12" high="15"/> + <bitfield name="SLICEID" low="16" high="18"/> + <bitfield name="USESLICEID" pos="23" type="boolean"/> + </bitset> + + <reg32 offset="0x081c" name="CP_APERTURE_CNTL_HOST" type="a8xx_aperture_cntl" variants="A8XX-"/> + <reg32 offset="0x081d" name="CP_APERTURE_CNTL_GMU" type="a8xx_aperture_cntl" variants="A8XX-"/> + <reg32 offset="0x081e" name="CP_APERTURE_CNTL_CD" type="a8xx_aperture_cntl" variants="A8XX-"/> + + <reg32 offset="0x0a61" name="CP_BV_PROTECT_STATUS" variants="A7XX"/> + <reg32 offset="0x0a64" name="CP_BV_HW_FAULT" variants="A7XX"/> + <reg32 offset="0x0a66" name="CP_BV_RB_RPTR" variants="A7XX"/> + <reg64 offset="0x0a6d" name="CP_BV_IB1_BASE" variants="A7XX"/> + <reg32 offset="0x0a70" name="CP_BV_IB1_REM_SIZE" variants="A7XX"/> + <reg64 offset="0x0a71" name="CP_BV_IB2_BASE" variants="A7XX"/> + <reg32 offset="0x0a74" name="CP_BV_IB2_REM_SIZE" variants="A7XX"/> + <reg32 offset="0x0a81" name="CP_BV_DRAW_STATE_ADDR" variants="A7XX"/> + <reg32 offset="0x0a82" name="CP_BV_DRAW_STATE_DATA" variants="A7XX"/> + <reg32 offset="0x0a83" name="CP_BV_ROQ_DBG_ADDR" variants="A7XX"/> + <reg32 offset="0x0a84" name="CP_BV_ROQ_DBG_DATA" variants="A7XX"/> + <reg32 offset="0x0a85" name="CP_BV_SQE_UCODE_DBG_ADDR" variants="A7XX"/> + <reg32 offset="0x0a86" name="CP_BV_SQE_UCODE_DBG_DATA" variants="A7XX"/> + <reg32 offset="0x0a87" name="CP_BV_SQE_STAT_ADDR" variants="A7XX"/> + <reg32 offset="0x0a88" name="CP_BV_SQE_STAT_DATA" variants="A7XX"/> + + <reg32 offset="0x0a8f" name="CP_BV_ROQ_AVAIL_RB" variants="A7XX"> <doc>number of remaining dwords incl current dword being consumed?</doc> <bitfield name="REM" low="16" high="31"/> </reg32> - <reg32 offset="0x094b" name="CP_ROQ_AVAIL_SDS"> + <reg32 offset="0x0a90" name="CP_BV_ROQ_AVAIL_IB1" variants="A7XX"> <doc>number of remaining dwords incl current dword being consumed?</doc> <bitfield name="REM" low="16" high="31"/> </reg32> - <reg32 offset="0x094c" name="CP_ROQ_AVAIL_MRB"> - <doc>number of dwords that have already been read but haven't been consumed by $addr</doc> - <bitfield name="REM" low="16" high="31"/> - </reg32> - <reg32 offset="0x094d" name="CP_ROQ_AVAIL_VSD"> + <reg32 offset="0x0a91" name="CP_BV_ROQ_AVAIL_IB2" variants="A7XX"> <doc>number of remaining dwords incl current dword being consumed?</doc> <bitfield name="REM" low="16" high="31"/> </reg32> - <bitset name="a7xx_aperture_cntl" inline="yes"> - <bitfield name="PIPE" low="12" high="13" type="a7xx_pipe"/> - <bitfield name="CLUSTER" low="8" high="10" type="a7xx_cluster"/> - <bitfield name="CONTEXT" low="4" high="5"/> - </bitset> - <reg64 offset="0x0980" name="CP_ALWAYS_ON_COUNTER"/> - <reg32 offset="0x098D" name="CP_AHB_CNTL"/> - <reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" variants="A6XX"/> - <reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" type="a7xx_aperture_cntl" variants="A7XX-"/> - <reg32 offset="0x0A01" name="CP_APERTURE_CNTL_SQE" variants="A6XX"/> - <reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" variants="A6XX"/> - <reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" type="a7xx_aperture_cntl" variants="A7XX-"/> - - <reg32 offset="0x0a61" name="CP_BV_PROTECT_STATUS" variants="A7XX-"/> - <reg32 offset="0x0a64" name="CP_BV_HW_FAULT" variants="A7XX-"/> - <reg32 offset="0x0a81" name="CP_BV_DRAW_STATE_ADDR" variants="A7XX-"/> - <reg32 offset="0x0a82" name="CP_BV_DRAW_STATE_DATA" variants="A7XX-"/> - <reg32 offset="0x0a83" name="CP_BV_ROQ_DBG_ADDR" variants="A7XX-"/> - <reg32 offset="0x0a84" name="CP_BV_ROQ_DBG_DATA" variants="A7XX-"/> - <reg32 offset="0x0a85" name="CP_BV_SQE_UCODE_DBG_ADDR" variants="A7XX-"/> - <reg32 offset="0x0a86" name="CP_BV_SQE_UCODE_DBG_DATA" variants="A7XX-"/> - <reg32 offset="0x0a87" name="CP_BV_SQE_STAT_ADDR" variants="A7XX-"/> - <reg32 offset="0x0a88" name="CP_BV_SQE_STAT_DATA" variants="A7XX-"/> - <reg32 offset="0x0a96" name="CP_BV_MEM_POOL_DBG_ADDR" variants="A7XX-"/> - <reg32 offset="0x0a97" name="CP_BV_MEM_POOL_DBG_DATA" variants="A7XX-"/> - <reg64 offset="0x0a98" name="CP_BV_RB_RPTR_ADDR" variants="A7XX-"/> - - <reg32 offset="0x0a9a" name="CP_RESOURCE_TABLE_DBG_ADDR" variants="A7XX-"/> - <reg32 offset="0x0a9b" name="CP_RESOURCE_TABLE_DBG_DATA" variants="A7XX-"/> - <reg32 offset="0x0ad0" name="CP_BV_APRIV_CNTL" variants="A7XX-"/> - <reg32 offset="0x0ada" name="CP_BV_CHICKEN_DBG" variants="A7XX-"/> - - <reg32 offset="0x0b0a" name="CP_LPAC_DRAW_STATE_ADDR" variants="A7XX-"/> - <reg32 offset="0x0b0b" name="CP_LPAC_DRAW_STATE_DATA" variants="A7XX-"/> - <reg32 offset="0x0b0c" name="CP_LPAC_ROQ_DBG_ADDR" variants="A7XX-"/> - <reg32 offset="0x0b27" name="CP_SQE_AC_UCODE_DBG_ADDR" variants="A7XX-"/> - <reg32 offset="0x0b28" name="CP_SQE_AC_UCODE_DBG_DATA" variants="A7XX-"/> - <reg32 offset="0x0b29" name="CP_SQE_AC_STAT_ADDR" variants="A7XX-"/> - <reg32 offset="0x0b2a" name="CP_SQE_AC_STAT_DATA" variants="A7XX-"/> - - <reg32 offset="0x0b31" name="CP_LPAC_APRIV_CNTL" variants="A7XX-"/> - <reg32 offset="0x0B34" name="CP_LPAC_PROG_FIFO_SIZE"/> - <reg32 offset="0x0b35" name="CP_LPAC_ROQ_DBG_DATA" variants="A7XX-"/> - <reg32 offset="0x0b36" name="CP_LPAC_FIFO_DBG_DATA" variants="A7XX-"/> - <reg32 offset="0x0b40" name="CP_LPAC_FIFO_DBG_ADDR" variants="A7XX-"/> - <reg32 offset="0x0b81" name="CP_LPAC_SQE_CNTL"/> - <reg64 offset="0x0b82" name="CP_LPAC_SQE_INSTR_BASE"/> - - <reg64 offset="0x0b70" name="CP_AQE_INSTR_BASE_0" variants="A7XX-"/> - <reg64 offset="0x0b72" name="CP_AQE_INSTR_BASE_1" variants="A7XX-"/> - <reg32 offset="0x0b78" name="CP_AQE_APRIV_CNTL" variants="A7XX-"/> - - <reg32 offset="0x0ba8" name="CP_AQE_ROQ_DBG_ADDR_0" variants="A7XX-"/> - <reg32 offset="0x0ba9" name="CP_AQE_ROQ_DBG_ADDR_1" variants="A7XX-"/> - <reg32 offset="0x0bac" name="CP_AQE_ROQ_DBG_DATA_0" variants="A7XX-"/> - <reg32 offset="0x0bad" name="CP_AQE_ROQ_DBG_DATA_1" variants="A7XX-"/> - <reg32 offset="0x0bb0" name="CP_AQE_UCODE_DBG_ADDR_0" variants="A7XX-"/> - <reg32 offset="0x0bb1" name="CP_AQE_UCODE_DBG_ADDR_1" variants="A7XX-"/> - <reg32 offset="0x0bb4" name="CP_AQE_UCODE_DBG_DATA_0" variants="A7XX-"/> - <reg32 offset="0x0bb5" name="CP_AQE_UCODE_DBG_DATA_1" variants="A7XX-"/> - <reg32 offset="0x0bb8" name="CP_AQE_STAT_ADDR_0" variants="A7XX-"/> - <reg32 offset="0x0bb9" name="CP_AQE_STAT_ADDR_1" variants="A7XX-"/> - <reg32 offset="0x0bbc" name="CP_AQE_STAT_DATA_0" variants="A7XX-"/> - <reg32 offset="0x0bbd" name="CP_AQE_STAT_DATA_1" variants="A7XX-"/> - - <reg32 offset="0x0C01" name="VSC_ADDR_MODE_CNTL" type="a5xx_address_mode"/> - <reg32 offset="0x0018" name="RBBM_GPR0_CNTL"/> - <reg32 offset="0x0201" name="RBBM_INT_0_STATUS" type="A6XX_RBBM_INT_0_MASK"/> - <reg32 offset="0x0210" name="RBBM_STATUS"> + <reg32 offset="0x0a96" name="CP_BV_MEM_POOL_DBG_ADDR" variants="A7XX"/> + <reg32 offset="0x0a97" name="CP_BV_MEM_POOL_DBG_DATA" variants="A7XX"/> + <reg64 offset="0x0a98" name="CP_BV_RB_RPTR_ADDR" variants="A7XX"/> + + <reg32 offset="0x0a9a" name="CP_RESOURCE_TABLE_DBG_ADDR" variants="A7XX"/> + <reg32 offset="0x0a9b" name="CP_RESOURCE_TABLE_DBG_DATA" variants="A7XX"/> + <reg32 offset="0x0ad0" name="CP_BV_APRIV_CNTL" variants="A7XX"/> + <reg32 offset="0x0ada" name="CP_BV_CHICKEN_DBG" variants="A7XX"/> + + <reg32 offset="0x0b0a" name="CP_LPAC_DRAW_STATE_ADDR" variants="A7XX"/> + <reg32 offset="0x0b0b" name="CP_LPAC_DRAW_STATE_DATA" variants="A7XX"/> + <reg32 offset="0x0b0c" name="CP_LPAC_ROQ_DBG_ADDR" variants="A7XX"/> + <reg32 offset="0x0b27" name="CP_SQE_AC_UCODE_DBG_ADDR" variants="A7XX"/> + <reg32 offset="0x0b28" name="CP_SQE_AC_UCODE_DBG_DATA" variants="A7XX"/> + <reg32 offset="0x0b29" name="CP_SQE_AC_STAT_ADDR" variants="A7XX"/> + <reg32 offset="0x0b2a" name="CP_SQE_AC_STAT_DATA" variants="A7XX"/> + + <reg32 offset="0x0b31" name="CP_LPAC_APRIV_CNTL" variants="A7XX"/> + <reg32 offset="0x0b34" name="CP_LPAC_PROG_FIFO_SIZE" variants="A7XX"/> + <reg32 offset="0x0b35" name="CP_LPAC_ROQ_DBG_DATA" variants="A7XX"/> + <reg32 offset="0x0b36" name="CP_LPAC_FIFO_DBG_DATA" variants="A7XX"/> + <reg32 offset="0x0b40" name="CP_LPAC_FIFO_DBG_ADDR" variants="A7XX"/> + <reg32 offset="0x0b81" name="CP_LPAC_SQE_CNTL" variants="A6XX-A7XX"/> + <reg64 offset="0x0b82" name="CP_LPAC_SQE_INSTR_BASE" variants="A6XX-A7XX"/> + + <reg64 offset="0x0b70" name="CP_AQE_INSTR_BASE_0" variants="A7XX"/> + <reg64 offset="0x0b72" name="CP_AQE_INSTR_BASE_1" variants="A7XX"/> + <reg32 offset="0x0b78" name="CP_AQE_APRIV_CNTL" variants="A7XX"/> + + <reg32 offset="0x0ba8" name="CP_AQE_ROQ_DBG_ADDR_0" variants="A7XX"/> + <reg32 offset="0x0ba9" name="CP_AQE_ROQ_DBG_ADDR_1" variants="A7XX"/> + <reg32 offset="0x0bac" name="CP_AQE_ROQ_DBG_DATA_0" variants="A7XX"/> + <reg32 offset="0x0bad" name="CP_AQE_ROQ_DBG_DATA_1" variants="A7XX"/> + <reg32 offset="0x0bb0" name="CP_AQE_UCODE_DBG_ADDR_0" variants="A7XX"/> + <reg32 offset="0x0bb1" name="CP_AQE_UCODE_DBG_ADDR_1" variants="A7XX"/> + <reg32 offset="0x0bb4" name="CP_AQE_UCODE_DBG_DATA_0" variants="A7XX"/> + <reg32 offset="0x0bb5" name="CP_AQE_UCODE_DBG_DATA_1" variants="A7XX"/> + <reg32 offset="0x0bb8" name="CP_AQE_STAT_ADDR_0" variants="A7XX"/> + <reg32 offset="0x0bb9" name="CP_AQE_STAT_ADDR_1" variants="A7XX"/> + <reg32 offset="0x0bbc" name="CP_AQE_STAT_DATA_0" variants="A7XX"/> + <reg32 offset="0x0bbd" name="CP_AQE_STAT_DATA_1" variants="A7XX"/> + + <reg32 offset="0x0C01" name="VSC_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/> + <reg32 offset="0x0018" name="RBBM_GPR0_CNTL" variants="A6XX"/> + <reg32 offset="0x0201" name="RBBM_INT_0_STATUS" type="A6XX_RBBM_INT_0_MASK" variants="A6XX-A7XX"/> + <reg32 offset="0x006a" name="RBBM_INT_0_STATUS" type="A6XX_RBBM_INT_0_MASK" variants="A8XX-"/> + <reg32 offset="0x0210" name="RBBM_STATUS" variants="A6XX-A7XX"> <bitfield pos="23" name="GPU_BUSY_IGN_AHB" type="boolean"/> <bitfield pos="22" name="GPU_BUSY_IGN_AHB_CP" type="boolean"/> <bitfield pos="21" name="HLSQ_BUSY" type="boolean"/> @@ -342,22 +571,59 @@ by a particular renderpass/blit. <bitfield pos="1" name="CP_AHB_BUSY_CP_MASTER" type="boolean"/> <bitfield pos="0" name="CP_AHB_BUSY_CX_MASTER" type="boolean"/> </reg32> - <reg32 offset="0x0211" name="RBBM_STATUS1"/> - <reg32 offset="0x0212" name="RBBM_STATUS2"/> - <reg32 offset="0x0213" name="RBBM_STATUS3"> + <reg32 offset="0x0211" name="RBBM_STATUS1" variants="A6XX-A7XX"/> + <reg32 offset="0x0212" name="RBBM_STATUS2" variants="A6XX-A7XX"/> + <reg32 offset="0x0213" name="RBBM_STATUS3" variants="A6XX-A7XX"> <bitfield pos="24" name="SMMU_STALLED_ON_FAULT" type="boolean"/> </reg32> - <reg32 offset="0x0215" name="RBBM_VBIF_GX_RESET_STATUS"/> - <reg32 offset="0x0260" name="RBBM_CLOCK_MODE_CP" variants="A7XX-"/> - <reg32 offset="0x0284" name="RBBM_CLOCK_MODE_BV_LRZ" variants="A7XX-"/> - <reg32 offset="0x0285" name="RBBM_CLOCK_MODE_BV_GRAS" variants="A7XX-"/> - <reg32 offset="0x0286" name="RBBM_CLOCK_MODE2_GRAS" variants="A7XX-"/> - <reg32 offset="0x0287" name="RBBM_CLOCK_MODE_BV_VFD" variants="A7XX-"/> - <reg32 offset="0x0288" name="RBBM_CLOCK_MODE_BV_GPC" variants="A7XX-"/> + <reg32 offset="0x012" name="RBBM_STATUS" variants="A8XX-"> + <bitfield pos="23" name="GPU_BUSY_IGN_AHB" type="boolean"/> + <bitfield pos="22" name="GPU_BUSY_IGN_AHB_CP" type="boolean"/> + <bitfield pos="21" name="SLICE_BUSY_IGN_CP" type="boolean"/> + <bitfield pos="20" name="CP_SLICE_BUSY" type="boolean"/> + <bitfield pos="19" name="UNSLICE_BUSY_IGN_AHB" type="boolean"/> + <bitfield pos="18" name="UNSLICE_BUSY_IGN_AHB_CP" type="boolean"/> + <bitfield pos="17" name="CP_SLICE_RL_BUSY" type="boolean"/> + <bitfield pos="14" name="UNSLICE_TOP_BUSY" type="boolean"/> + <bitfield pos="13" name="UFC_BUSY" type="boolean"/> + <bitfield pos="12" name="HLSQ_BUSY" type="boolean"/> + <bitfield pos="11" name="VSC_BUSY" type="boolean"/> + <bitfield pos="10" name="UCHE_BUSY" type="boolean"/> + <bitfield pos="9" name="VPC_BUSY" type="boolean"/> + <bitfield pos="8" name="PC_BUSY" type="boolean"/> + <bitfield pos="7" name="CMP_BUSY" type="boolean"/> + <bitfield pos="6" name="DCMP_BUSY" type="boolean"/> + <bitfield pos="5" name="VBIF_GX_BUSY" type="boolean"/> + <bitfield pos="4" name="DBGC_PERF_BUSY" type="boolean"/> + <bitfield pos="3" name="GFX_DBGC_BUSY" type="boolean"/> + <bitfield pos="2" name="CP_BUSY" type="boolean"/> + <bitfield pos="1" name="CP_AHB_BUSY_CP_MASTER" type="boolean"/> + <bitfield pos="0" name="CP_AHB_BUSY_CX_MASTER" type="boolean"/> + </reg32> + <reg32 offset="0x013" name="RBBM_STATUS1" variants="A8XX-"/> + <reg32 offset="0x015" name="RBBM_GFX_STATUS" variants="A8XX-"/> + <reg32 offset="0x016" name="RBBM_GFX_STATUS1" variants="A8XX-"/> + <reg32 offset="0x018" name="RBBM_LPAC_STATUS" variants="A8XX-"/> + <reg32 offset="0x01a" name="RBBM_GFX_BR_STATUS" variants="A8XX-"/> + <reg32 offset="0x01c" name="RBBM_GFX_BV_STATUS" variants="A8XX-"/> + <reg32 offset="0x01e" name="RBBM_MISC_STATUS" variants="A8XX-"> + <bitfield pos="0" name="SMMU_STALLED_ON_FAULT" type="boolean"/> + </reg32> + + <reg32 offset="0x0215" name="RBBM_VBIF_GX_RESET_STATUS" variants="A6XX"/> - <reg32 offset="0x02c0" name="RBBM_SW_FUSE_INT_STATUS" variants="A7XX-"/> - <reg32 offset="0x02c1" name="RBBM_SW_FUSE_INT_MASK" variants="A7XX-"/> + <reg32 offset="0x0260" name="RBBM_CLOCK_MODE_CP" variants="A7XX"/> + <reg32 offset="0x0284" name="RBBM_CLOCK_MODE_BV_LRZ" variants="A7XX"/> + <reg32 offset="0x0285" name="RBBM_CLOCK_MODE_BV_GRAS" variants="A7XX"/> + <reg32 offset="0x0286" name="RBBM_CLOCK_MODE2_GRAS" variants="A7XX"/> + <reg32 offset="0x0287" name="RBBM_CLOCK_MODE_BV_VFD" variants="A7XX"/> + <reg32 offset="0x0288" name="RBBM_CLOCK_MODE_BV_GPC" variants="A7XX"/> + + <reg32 offset="0x02c0" name="RBBM_SW_FUSE_INT_STATUS" variants="A7XX"/> + <reg32 offset="0x02c1" name="RBBM_SW_FUSE_INT_MASK" variants="A7XX"/> + <reg32 offset="0x0071" name="RBBM_SW_FUSE_INT_STATUS" variants="A8XX-"/> + <reg32 offset="0x0072" name="RBBM_SW_FUSE_INT_MASK" variants="A8XX-"/> <array offset="0x0400" name="RBBM_PERFCTR_CP" stride="2" length="14" variants="A6XX"/> <array offset="0x041c" name="RBBM_PERFCTR_RBBM" stride="2" length="4" variants="A6XX"/> @@ -376,49 +642,96 @@ by a particular renderpass/blit. <array offset="0x04ea" name="RBBM_PERFCTR_LRZ" stride="2" length="4" variants="A6XX"/> <array offset="0x04f2" name="RBBM_PERFCTR_CMP" stride="2" length="4" variants="A6XX"/> - <array offset="0x0300" name="RBBM_PERFCTR_CP" stride="2" length="14" variants="A7XX-"/> - <array offset="0x031c" name="RBBM_PERFCTR_RBBM" stride="2" length="4" variants="A7XX-"/> - <array offset="0x0324" name="RBBM_PERFCTR_PC" stride="2" length="8" variants="A7XX-"/> - <array offset="0x0334" name="RBBM_PERFCTR_VFD" stride="2" length="8" variants="A7XX-"/> - <array offset="0x0344" name="RBBM_PERFCTR_HLSQ" stride="2" length="6" variants="A7XX-"/> - <array offset="0x0350" name="RBBM_PERFCTR_VPC" stride="2" length="6" variants="A7XX-"/> - <array offset="0x035c" name="RBBM_PERFCTR_CCU" stride="2" length="5" variants="A7XX-"/> - <array offset="0x0366" name="RBBM_PERFCTR_TSE" stride="2" length="4" variants="A7XX-"/> - <array offset="0x036e" name="RBBM_PERFCTR_RAS" stride="2" length="4" variants="A7XX-"/> - <array offset="0x0376" name="RBBM_PERFCTR_UCHE" stride="2" length="12" variants="A7XX-"/> - <array offset="0x038e" name="RBBM_PERFCTR_TP" stride="2" length="12" variants="A7XX-"/> - <array offset="0x03a6" name="RBBM_PERFCTR_SP" stride="2" length="24" variants="A7XX-"/> - <array offset="0x03d6" name="RBBM_PERFCTR_RB" stride="2" length="8" variants="A7XX-"/> - <array offset="0x03e6" name="RBBM_PERFCTR_VSC" stride="2" length="2" variants="A7XX-"/> - <array offset="0x03ea" name="RBBM_PERFCTR_LRZ" stride="2" length="4" variants="A7XX-"/> - <array offset="0x03f2" name="RBBM_PERFCTR_CMP" stride="2" length="4" variants="A7XX-"/> - <array offset="0x03fa" name="RBBM_PERFCTR_UFC" stride="2" length="4" variants="A7XX-"/> - <array offset="0x0410" name="RBBM_PERFCTR2_HLSQ" stride="2" length="6" variants="A7XX-"/> - <array offset="0x041c" name="RBBM_PERFCTR2_CP" stride="2" length="7" variants="A7XX-"/> - <array offset="0x042a" name="RBBM_PERFCTR2_SP" stride="2" length="12" variants="A7XX-"/> - <array offset="0x0442" name="RBBM_PERFCTR2_TP" stride="2" length="6" variants="A7XX-"/> - <array offset="0x044e" name="RBBM_PERFCTR2_UFC" stride="2" length="2" variants="A7XX-"/> - <array offset="0x0460" name="RBBM_PERFCTR_BV_PC" stride="2" length="8" variants="A7XX-"/> - <array offset="0x0470" name="RBBM_PERFCTR_BV_VFD" stride="2" length="8" variants="A7XX-"/> - <array offset="0x0480" name="RBBM_PERFCTR_BV_VPC" stride="2" length="6" variants="A7XX-"/> - <array offset="0x048c" name="RBBM_PERFCTR_BV_TSE" stride="2" length="4" variants="A7XX-"/> - <array offset="0x0494" name="RBBM_PERFCTR_BV_RAS" stride="2" length="4" variants="A7XX-"/> - <array offset="0x049c" name="RBBM_PERFCTR_BV_LRZ" stride="2" length="4" variants="A7XX-"/> - - <reg32 offset="0x0500" name="RBBM_PERFCTR_CNTL"/> - <reg32 offset="0x0501" name="RBBM_PERFCTR_LOAD_CMD0"/> - <reg32 offset="0x0502" name="RBBM_PERFCTR_LOAD_CMD1"/> - <reg32 offset="0x0503" name="RBBM_PERFCTR_LOAD_CMD2"/> - <reg32 offset="0x0504" name="RBBM_PERFCTR_LOAD_CMD3"/> - <reg32 offset="0x0505" name="RBBM_PERFCTR_LOAD_VALUE_LO"/> - <reg32 offset="0x0506" name="RBBM_PERFCTR_LOAD_VALUE_HI"/> - <array offset="0x0507" name="RBBM_PERFCTR_RBBM_SEL" stride="1" length="4"/> - <reg32 offset="0x050B" name="RBBM_PERFCTR_GPU_BUSY_MASKED"/> - <reg32 offset="0x050e" name="RBBM_PERFCTR_SRAM_INIT_CMD"/> - <reg32 offset="0x050f" name="RBBM_PERFCTR_SRAM_INIT_STATUS"/> - <reg32 offset="0x0533" name="RBBM_ISDB_CNT"/> - <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL"/> - <reg32 offset="0x0535" name="RBBM_SNAPSHOT_STATUS" variants="A7XX-"/> + <array offset="0x0300" name="RBBM_PERFCTR_CP" stride="2" length="14" variants="A7XX"/> + <array offset="0x031c" name="RBBM_PERFCTR_RBBM" stride="2" length="4" variants="A7XX"/> + <array offset="0x0324" name="RBBM_PERFCTR_PC" stride="2" length="8" variants="A7XX"/> + <array offset="0x0334" name="RBBM_PERFCTR_VFD" stride="2" length="8" variants="A7XX"/> + <array offset="0x0344" name="RBBM_PERFCTR_HLSQ" stride="2" length="6" variants="A7XX"/> + <array offset="0x0350" name="RBBM_PERFCTR_VPC" stride="2" length="6" variants="A7XX"/> + <array offset="0x035c" name="RBBM_PERFCTR_CCU" stride="2" length="5" variants="A7XX"/> + <array offset="0x0366" name="RBBM_PERFCTR_TSE" stride="2" length="4" variants="A7XX"/> + <array offset="0x036e" name="RBBM_PERFCTR_RAS" stride="2" length="4" variants="A7XX"/> + <array offset="0x0376" name="RBBM_PERFCTR_UCHE" stride="2" length="12" variants="A7XX"/> + <array offset="0x038e" name="RBBM_PERFCTR_TP" stride="2" length="12" variants="A7XX"/> + <array offset="0x03a6" name="RBBM_PERFCTR_SP" stride="2" length="24" variants="A7XX"/> + <array offset="0x03d6" name="RBBM_PERFCTR_RB" stride="2" length="8" variants="A7XX"/> + <array offset="0x03e6" name="RBBM_PERFCTR_VSC" stride="2" length="2" variants="A7XX"/> + <array offset="0x03ea" name="RBBM_PERFCTR_LRZ" stride="2" length="4" variants="A7XX"/> + <array offset="0x03f2" name="RBBM_PERFCTR_CMP" stride="2" length="4" variants="A7XX"/> + <array offset="0x03fa" name="RBBM_PERFCTR_UFC" stride="2" length="4" variants="A7XX"/> + <array offset="0x0410" name="RBBM_PERFCTR2_HLSQ" stride="2" length="6" variants="A7XX"/> + <array offset="0x041c" name="RBBM_PERFCTR2_CP" stride="2" length="7" variants="A7XX"/> + <array offset="0x042a" name="RBBM_PERFCTR2_SP" stride="2" length="12" variants="A7XX"/> + <array offset="0x0442" name="RBBM_PERFCTR2_TP" stride="2" length="6" variants="A7XX"/> + <array offset="0x044e" name="RBBM_PERFCTR2_UFC" stride="2" length="2" variants="A7XX"/> + <array offset="0x0460" name="RBBM_PERFCTR_BV_PC" stride="2" length="8" variants="A7XX"/> + <array offset="0x0470" name="RBBM_PERFCTR_BV_VFD" stride="2" length="8" variants="A7XX"/> + <array offset="0x0480" name="RBBM_PERFCTR_BV_VPC" stride="2" length="6" variants="A7XX"/> + <array offset="0x048c" name="RBBM_PERFCTR_BV_TSE" stride="2" length="4" variants="A7XX"/> + <array offset="0x0494" name="RBBM_PERFCTR_BV_RAS" stride="2" length="4" variants="A7XX"/> + <array offset="0x049c" name="RBBM_PERFCTR_BV_LRZ" stride="2" length="4" variants="A7XX"/> + + <array offset="0x01b0" name="RBBM_PERFCTR_CP" stride="2" length="14" variants="A8XX"/> + <array offset="0x01cc" name="RBBM_PERFCTR_RBBM" stride="2" length="4" variants="A8XX"/> + <array offset="0x01d4" name="RBBM_PERFCTR_PC" stride="2" length="8" variants="A8XX"/> + <array offset="0x01e4" name="RBBM_PERFCTR_VFD" stride="2" length="8" variants="A8XX"/> + <array offset="0x01f4" name="RBBM_PERFCTR_HLSQ" stride="2" length="6" variants="A8XX"/> + <array offset="0x0200" name="RBBM_PERFCTR_VPC" stride="2" length="6" variants="A8XX"/> + <array offset="0x020c" name="RBBM_PERFCTR_CCU" stride="2" length="5" variants="A8XX"/> + <array offset="0x0216" name="RBBM_PERFCTR_TSE" stride="2" length="4" variants="A8XX"/> + <array offset="0x021e" name="RBBM_PERFCTR_RAS" stride="2" length="4" variants="A8XX"/> + <array offset="0x0226" name="RBBM_PERFCTR_UCHE" stride="2" length="24" variants="A8XX"/> + <array offset="0x0256" name="RBBM_PERFCTR_TP" stride="2" length="12" variants="A8XX"/> + <array offset="0x026e" name="RBBM_PERFCTR_SP" stride="2" length="24" variants="A8XX"/> + <array offset="0x029e" name="RBBM_PERFCTR_RB" stride="2" length="8" variants="A8XX"/> + <array offset="0x02ae" name="RBBM_PERFCTR_VSC" stride="2" length="2" variants="A8XX"/> + <array offset="0x02b2" name="RBBM_PERFCTR_LRZ" stride="2" length="4" variants="A8XX"/> + <array offset="0x02ba" name="RBBM_PERFCTR_CMP" stride="2" length="4" variants="A8XX"/> + <array offset="0x02c2" name="RBBM_PERFCTR_UFC" stride="2" length="4" variants="A8XX"/> + <array offset="0x02e2" name="RBBM_PERFCTR2_HLSQ" stride="2" length="6" variants="A8XX"/> + <array offset="0x02ee" name="RBBM_PERFCTR2_CP" stride="2" length="7" variants="A8XX"/> + <array offset="0x02fc" name="RBBM_PERFCTR2_SP" stride="2" length="12" variants="A8XX"/> + <array offset="0x0314" name="RBBM_PERFCTR2_TP" stride="2" length="8" variants="A8XX"/> + <array offset="0x0324" name="RBBM_PERFCTR2_UFC" stride="2" length="2" variants="A8XX"/> + <array offset="0x0328" name="RBBM_PERFCTR_BV_PC" stride="2" length="8" variants="A8XX"/> + <array offset="0x0338" name="RBBM_PERFCTR_BV_VFD" stride="2" length="8" variants="A8XX"/> + <array offset="0x0348" name="RBBM_PERFCTR_BV_VPC" stride="2" length="6" variants="A8XX"/> + <array offset="0x0354" name="RBBM_PERFCTR_BV_TSE" stride="2" length="4" variants="A8XX"/> + <array offset="0x035c" name="RBBM_PERFCTR_BV_RAS" stride="2" length="4" variants="A8XX"/> + <array offset="0x0364" name="RBBM_PERFCTR_BV_LRZ" stride="2" length="4" variants="A8XX"/> + <array offset="0x036c" name="RBBM_PERFCTR_BV_CCU" stride="2" length="3" variants="A8XX"/> + <array offset="0x0372" name="RBBM_PERFCTR_BV_RB" stride="2" length="6" variants="A8XX"/> + + <reg32 offset="0x0500" name="RBBM_PERFCTR_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x0460" name="RBBM_PERFCTR_CNTL" variants="A8XX-"/> + <reg32 offset="0x0501" name="RBBM_PERFCTR_LOAD_CMD0" variants="A6XX"/> + <reg32 offset="0x0502" name="RBBM_PERFCTR_LOAD_CMD1" variants="A6XX"/> + <reg32 offset="0x0503" name="RBBM_PERFCTR_LOAD_CMD2" variants="A6XX"/> + <reg32 offset="0x0504" name="RBBM_PERFCTR_LOAD_CMD3" variants="A6XX"/> + <reg64 offset="0x0505" name="RBBM_PERFCTR_LOAD_VALUE" variants="A6XX"/> + <array offset="0x0507" name="RBBM_PERFCTR_RBBM_SEL" stride="1" length="4" variants="A6XX-A7XX"/> + <array offset="0x0441" name="RBBM_PERFCTR_RBBM_SEL" stride="1" length="4" variants="A8XX-"/> + <reg32 offset="0x050B" name="RBBM_PERFCTR_GPU_BUSY_MASKED" variants="A6XX-A7XX"/> + <reg32 offset="0x019e" name="RBBM_PERFCTR_GPU_BUSY_MASKED" variants="A8XX-"/> + <reg32 offset="0x050e" name="RBBM_PERFCTR_SRAM_INIT_CMD" variants="A6XX-A7XX"/> + <reg32 offset="0x0449" name="RBBM_PERFCTR_SRAM_INIT_CMD" variants="A8XX-"/> + <reg32 offset="0x050f" name="RBBM_PERFCTR_SRAM_INIT_STATUS" variants="A6XX-A7XX"/> + <reg32 offset="0x019f" name="RBBM_PERFCTR_SRAM_INIT_STATUS" variants="A8XX-"/> + <reg32 offset="0x01a1" name="RBBM_PERFCTR_FLUSH_HOST_STATUS" variants="A8XX-"/> + <reg32 offset="0x044c" name="RBBM_PERFCTR_FLUSH_HOST_CMD" variants="A8XX-"/> + <reg32 offset="0x0533" name="RBBM_ISDB_CNT" variants="A6XX-A7XX"/> + <reg32 offset="0x002d" name="RBBM_ISDB_CNT" variants="A8XX-"/> + <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x0440" name="RBBM_NC_MODE_CNTL" variants="A8XX-"/> + <reg32 offset="0x0535" name="RBBM_SNAPSHOT_STATUS" variants="A7XX"/> + <reg32 offset="0x002e" name="RBBM_SNAPSHOT_STATUS" variants="A8XX-"/> + + <reg32 offset="0x500" name="RBBM_SLICE_PERFCTR_CNTL" variants="A8XX-"/> + <reg32 offset="0x58f" name="RBBM_SLICE_INTERFACE_HANG_INT_CNTL" variants="A8XX-"/> + <array offset="0x5e0" name="RBBM_SLICE_PERFCTR_RBBM_SEL" stride="1" length="4" variants="A8XX-"/> + <reg32 offset="0x5e8" name="RBBM_SLICE_PERFCTR_SRAM_INIT_CMD" variants="A8XX-"/> + <reg32 offset="0x5eb" name="RBBM_SLICE_PERFCTR_FLUSH_HOST_CMD" variants="A8XX-"/> + <reg32 offset="0x5ec" name="RBBM_SLICE_NC_MODE_CNTL" variants="A8XX-"/> <!--- This block of registers aren't tied to perf counters. They @@ -426,170 +739,211 @@ by a particular renderpass/blit. vertices in, number of primnitives assembled etc. --> - <reg64 offset="0x0540" name="RBBM_PIPESTAT_IAVERTICES"/> - <reg64 offset="0x0542" name="RBBM_PIPESTAT_IAPRIMITIVES"/> - <reg64 offset="0x0544" name="RBBM_PIPESTAT_VSINVOCATIONS"/> - <reg64 offset="0x0546" name="RBBM_PIPESTAT_HSINVOCATIONS"/> - <reg64 offset="0x0548" name="RBBM_PIPESTAT_DSINVOCATIONS"/> - <reg64 offset="0x054a" name="RBBM_PIPESTAT_GSINVOCATIONS"/> - <reg64 offset="0x054c" name="RBBM_PIPESTAT_GSPRIMITIVES"/> - <reg64 offset="0x054e" name="RBBM_PIPESTAT_CINVOCATIONS"/> - <reg64 offset="0x0550" name="RBBM_PIPESTAT_CPRIMITIVES"/> - <reg64 offset="0x0552" name="RBBM_PIPESTAT_PSINVOCATIONS"/> - <reg64 offset="0x0554" name="RBBM_PIPESTAT_CSINVOCATIONS"/> + <reg64 offset="0x0540" name="RBBM_PIPESTAT_IAVERTICES" variants="A6XX-A7XX"/> + <reg64 offset="0x0542" name="RBBM_PIPESTAT_IAPRIMITIVES" variants="A6XX-A7XX"/> + <reg64 offset="0x0544" name="RBBM_PIPESTAT_VSINVOCATIONS" variants="A6XX-A7XX"/> + <reg64 offset="0x0546" name="RBBM_PIPESTAT_HSINVOCATIONS" variants="A6XX-A7XX"/> + <reg64 offset="0x0548" name="RBBM_PIPESTAT_DSINVOCATIONS" variants="A6XX-A7XX"/> + <reg64 offset="0x054a" name="RBBM_PIPESTAT_GSINVOCATIONS" variants="A6XX-A7XX"/> + <reg64 offset="0x054c" name="RBBM_PIPESTAT_GSPRIMITIVES" variants="A6XX-A7XX"/> + <reg64 offset="0x054e" name="RBBM_PIPESTAT_CINVOCATIONS" variants="A6XX-A7XX"/> + <reg64 offset="0x0550" name="RBBM_PIPESTAT_CPRIMITIVES" variants="A6XX-A7XX"/> + <reg64 offset="0x0552" name="RBBM_PIPESTAT_PSINVOCATIONS" variants="A6XX-A7XX"/> + <reg64 offset="0x0554" name="RBBM_PIPESTAT_CSINVOCATIONS" variants="A6XX-A7XX"/> + + <reg64 offset="0x0380" name="RBBM_PIPESTAT_IAVERTICES" variants="A8XX-"/> + <reg64 offset="0x0382" name="RBBM_PIPESTAT_IAPRIMITIVES" variants="A8XX-"/> + <reg64 offset="0x0384" name="RBBM_PIPESTAT_VSINVOCATIONS" variants="A8XX-"/> + <reg64 offset="0x0386" name="RBBM_PIPESTAT_GSINVOCATIONS" variants="A8XX-"/> + <reg64 offset="0x0388" name="RBBM_PIPESTAT_GSPRIMITIVES" variants="A8XX-"/> + <reg64 offset="0x038a" name="RBBM_PIPESTAT_CINVOCATIONS" variants="A8XX-"/> + <reg64 offset="0x038c" name="RBBM_PIPESTAT_CPRIMITIVES" variants="A8XX-"/> + <reg64 offset="0x038e" name="RBBM_PIPESTAT_PSINVOCATIONS" variants="A8XX-"/> + <reg64 offset="0x0390" name="RBBM_PIPESTAT_HSINVOCATIONS" variants="A8XX-"/> + <reg64 offset="0x0392" name="RBBM_PIPESTAT_DSINVOCATIONS" variants="A8XX-"/> + <reg64 offset="0x0394" name="RBBM_PIPESTAT_CSINVOCATIONS" variants="A8XX-"/> + <reg64 offset="0x0396" name="RBBM_PIPESTAT_ASINVOCATIONS" variants="A8XX-"/> + <reg64 offset="0x0398" name="RBBM_PIPESTAT_MSINVOCATIONS" variants="A8XX-"/> + <reg64 offset="0x039a" name="RBBM_PIPESTAT_MSPRIMITIVES" variants="A8XX-"/> <reg32 offset="0xF400" name="RBBM_SECVID_TRUST_CNTL"/> <reg64 offset="0xF800" name="RBBM_SECVID_TSB_TRUSTED_BASE"/> <reg32 offset="0xF802" name="RBBM_SECVID_TSB_TRUSTED_SIZE"/> <reg32 offset="0xF803" name="RBBM_SECVID_TSB_CNTL"/> - <reg32 offset="0xF810" name="RBBM_SECVID_TSB_ADDR_MODE_CNTL" type="a5xx_address_mode"/> + <reg32 offset="0xF810" name="RBBM_SECVID_TSB_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/> <reg64 offset="0xfc00" name="RBBM_SECVID_TSB_STATUS" variants="A7XX-"/> - <reg32 offset="0x00010" name="RBBM_VBIF_CLIENT_QOS_CNTL"/> - <reg32 offset="0x00011" name="RBBM_GBIF_CLIENT_QOS_CNTL"/> - <reg32 offset="0x00016" name="RBBM_GBIF_HALT"/> - <reg32 offset="0x00017" name="RBBM_GBIF_HALT_ACK"/> - <reg32 offset="0x0001c" name="RBBM_WAIT_FOR_GPU_IDLE_CMD"> + <reg32 offset="0x00010" name="RBBM_VBIF_CLIENT_QOS_CNTL" variants="A6XX"/> + <reg32 offset="0x00011" name="RBBM_GBIF_CLIENT_QOS_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x00008" name="RBBM_GBIF_CLIENT_QOS_CNTL" variants="A8XX-"/> + <reg32 offset="0x00016" name="RBBM_GBIF_HALT" variants="A6XX-A7XX"/> + <reg32 offset="0x0000a" name="RBBM_GBIF_HALT" variants="A8XX-"/> + <reg32 offset="0x00017" name="RBBM_GBIF_HALT_ACK" variants="A6XX-A7XX"/> + <reg32 offset="0x0000b" name="RBBM_GBIF_HALT_ACK" variants="A8XX-"/> + <reg32 offset="0x0001c" name="RBBM_WAIT_FOR_GPU_IDLE_CMD" variants="A6XX"> <bitfield pos="0" name="WAIT_GPU_IDLE" type="boolean"/> </reg32> - <reg32 offset="0x00016" name="RBBM_GBIF_HALT" variants="A7XX-"/> - <reg32 offset="0x00017" name="RBBM_GBIF_HALT_ACK" variants="A7XX-"/> - <reg32 offset="0x0001f" name="RBBM_INTERFACE_HANG_INT_CNTL"/> - <reg32 offset="0x00037" name="RBBM_INT_CLEAR_CMD" type="A6XX_RBBM_INT_0_MASK"/> - <reg32 offset="0x00038" name="RBBM_INT_0_MASK" type="A6XX_RBBM_INT_0_MASK"/> - <reg32 offset="0x0003a" name="RBBM_INT_2_MASK" variants="A7XX-"/> - <reg32 offset="0x00042" name="RBBM_SP_HYST_CNT"/> - <reg32 offset="0x00043" name="RBBM_SW_RESET_CMD"/> - <reg32 offset="0x00044" name="RBBM_RAC_THRESHOLD_CNT"/> - <reg32 offset="0x00045" name="RBBM_BLOCK_SW_RESET_CMD"/> - <reg32 offset="0x00046" name="RBBM_BLOCK_SW_RESET_CMD2"/> - <reg32 offset="0x000ad" name="RBBM_CLOCK_CNTL_GLOBAL" variants="A7XX-"/> - <reg32 offset="0x000ae" name="RBBM_CLOCK_CNTL"/> - <reg32 offset="0x000b0" name="RBBM_CLOCK_CNTL_SP0"/> - <reg32 offset="0x000b1" name="RBBM_CLOCK_CNTL_SP1"/> - <reg32 offset="0x000b2" name="RBBM_CLOCK_CNTL_SP2"/> - <reg32 offset="0x000b3" name="RBBM_CLOCK_CNTL_SP3"/> - <reg32 offset="0x000b4" name="RBBM_CLOCK_CNTL2_SP0"/> - <reg32 offset="0x000b5" name="RBBM_CLOCK_CNTL2_SP1"/> - <reg32 offset="0x000b6" name="RBBM_CLOCK_CNTL2_SP2"/> - <reg32 offset="0x000b7" name="RBBM_CLOCK_CNTL2_SP3"/> - <reg32 offset="0x000b8" name="RBBM_CLOCK_DELAY_SP0"/> - <reg32 offset="0x000b9" name="RBBM_CLOCK_DELAY_SP1"/> - <reg32 offset="0x000ba" name="RBBM_CLOCK_DELAY_SP2"/> - <reg32 offset="0x000bb" name="RBBM_CLOCK_DELAY_SP3"/> - <reg32 offset="0x000bc" name="RBBM_CLOCK_HYST_SP0"/> - <reg32 offset="0x000bd" name="RBBM_CLOCK_HYST_SP1"/> - <reg32 offset="0x000be" name="RBBM_CLOCK_HYST_SP2"/> - <reg32 offset="0x000bf" name="RBBM_CLOCK_HYST_SP3"/> - <reg32 offset="0x000c0" name="RBBM_CLOCK_CNTL_TP0"/> - <reg32 offset="0x000c1" name="RBBM_CLOCK_CNTL_TP1"/> - <reg32 offset="0x000c2" name="RBBM_CLOCK_CNTL_TP2"/> - <reg32 offset="0x000c3" name="RBBM_CLOCK_CNTL_TP3"/> - <reg32 offset="0x000c4" name="RBBM_CLOCK_CNTL2_TP0"/> - <reg32 offset="0x000c5" name="RBBM_CLOCK_CNTL2_TP1"/> - <reg32 offset="0x000c6" name="RBBM_CLOCK_CNTL2_TP2"/> - <reg32 offset="0x000c7" name="RBBM_CLOCK_CNTL2_TP3"/> - <reg32 offset="0x000c8" name="RBBM_CLOCK_CNTL3_TP0"/> - <reg32 offset="0x000c9" name="RBBM_CLOCK_CNTL3_TP1"/> - <reg32 offset="0x000ca" name="RBBM_CLOCK_CNTL3_TP2"/> - <reg32 offset="0x000cb" name="RBBM_CLOCK_CNTL3_TP3"/> - <reg32 offset="0x000cc" name="RBBM_CLOCK_CNTL4_TP0"/> - <reg32 offset="0x000cd" name="RBBM_CLOCK_CNTL4_TP1"/> - <reg32 offset="0x000ce" name="RBBM_CLOCK_CNTL4_TP2"/> - <reg32 offset="0x000cf" name="RBBM_CLOCK_CNTL4_TP3"/> - <reg32 offset="0x000d0" name="RBBM_CLOCK_DELAY_TP0"/> - <reg32 offset="0x000d1" name="RBBM_CLOCK_DELAY_TP1"/> - <reg32 offset="0x000d2" name="RBBM_CLOCK_DELAY_TP2"/> - <reg32 offset="0x000d3" name="RBBM_CLOCK_DELAY_TP3"/> - <reg32 offset="0x000d4" name="RBBM_CLOCK_DELAY2_TP0"/> - <reg32 offset="0x000d5" name="RBBM_CLOCK_DELAY2_TP1"/> - <reg32 offset="0x000d6" name="RBBM_CLOCK_DELAY2_TP2"/> - <reg32 offset="0x000d7" name="RBBM_CLOCK_DELAY2_TP3"/> - <reg32 offset="0x000d8" name="RBBM_CLOCK_DELAY3_TP0"/> - <reg32 offset="0x000d9" name="RBBM_CLOCK_DELAY3_TP1"/> - <reg32 offset="0x000da" name="RBBM_CLOCK_DELAY3_TP2"/> - <reg32 offset="0x000db" name="RBBM_CLOCK_DELAY3_TP3"/> - <reg32 offset="0x000dc" name="RBBM_CLOCK_DELAY4_TP0"/> - <reg32 offset="0x000dd" name="RBBM_CLOCK_DELAY4_TP1"/> - <reg32 offset="0x000de" name="RBBM_CLOCK_DELAY4_TP2"/> - <reg32 offset="0x000df" name="RBBM_CLOCK_DELAY4_TP3"/> - <reg32 offset="0x000e0" name="RBBM_CLOCK_HYST_TP0"/> - <reg32 offset="0x000e1" name="RBBM_CLOCK_HYST_TP1"/> - <reg32 offset="0x000e2" name="RBBM_CLOCK_HYST_TP2"/> - <reg32 offset="0x000e3" name="RBBM_CLOCK_HYST_TP3"/> - <reg32 offset="0x000e4" name="RBBM_CLOCK_HYST2_TP0"/> - <reg32 offset="0x000e5" name="RBBM_CLOCK_HYST2_TP1"/> - <reg32 offset="0x000e6" name="RBBM_CLOCK_HYST2_TP2"/> - <reg32 offset="0x000e7" name="RBBM_CLOCK_HYST2_TP3"/> - <reg32 offset="0x000e8" name="RBBM_CLOCK_HYST3_TP0"/> - <reg32 offset="0x000e9" name="RBBM_CLOCK_HYST3_TP1"/> - <reg32 offset="0x000ea" name="RBBM_CLOCK_HYST3_TP2"/> - <reg32 offset="0x000eb" name="RBBM_CLOCK_HYST3_TP3"/> - <reg32 offset="0x000ec" name="RBBM_CLOCK_HYST4_TP0"/> - <reg32 offset="0x000ed" name="RBBM_CLOCK_HYST4_TP1"/> - <reg32 offset="0x000ee" name="RBBM_CLOCK_HYST4_TP2"/> - <reg32 offset="0x000ef" name="RBBM_CLOCK_HYST4_TP3"/> - <reg32 offset="0x000f0" name="RBBM_CLOCK_CNTL_RB0"/> - <reg32 offset="0x000f1" name="RBBM_CLOCK_CNTL_RB1"/> - <reg32 offset="0x000f2" name="RBBM_CLOCK_CNTL_RB2"/> - <reg32 offset="0x000f3" name="RBBM_CLOCK_CNTL_RB3"/> - <reg32 offset="0x000f4" name="RBBM_CLOCK_CNTL2_RB0"/> - <reg32 offset="0x000f5" name="RBBM_CLOCK_CNTL2_RB1"/> - <reg32 offset="0x000f6" name="RBBM_CLOCK_CNTL2_RB2"/> - <reg32 offset="0x000f7" name="RBBM_CLOCK_CNTL2_RB3"/> - <reg32 offset="0x000f8" name="RBBM_CLOCK_CNTL_CCU0"/> - <reg32 offset="0x000f9" name="RBBM_CLOCK_CNTL_CCU1"/> - <reg32 offset="0x000fa" name="RBBM_CLOCK_CNTL_CCU2"/> - <reg32 offset="0x000fb" name="RBBM_CLOCK_CNTL_CCU3"/> - <reg32 offset="0x00100" name="RBBM_CLOCK_HYST_RB_CCU0"/> - <reg32 offset="0x00101" name="RBBM_CLOCK_HYST_RB_CCU1"/> - <reg32 offset="0x00102" name="RBBM_CLOCK_HYST_RB_CCU2"/> - <reg32 offset="0x00103" name="RBBM_CLOCK_HYST_RB_CCU3"/> - <reg32 offset="0x00104" name="RBBM_CLOCK_CNTL_RAC"/> - <reg32 offset="0x00105" name="RBBM_CLOCK_CNTL2_RAC"/> - <reg32 offset="0x00106" name="RBBM_CLOCK_DELAY_RAC"/> - <reg32 offset="0x00107" name="RBBM_CLOCK_HYST_RAC"/> - <reg32 offset="0x00108" name="RBBM_CLOCK_CNTL_TSE_RAS_RBBM"/> - <reg32 offset="0x00109" name="RBBM_CLOCK_DELAY_TSE_RAS_RBBM"/> - <reg32 offset="0x0010a" name="RBBM_CLOCK_HYST_TSE_RAS_RBBM"/> - <reg32 offset="0x0010b" name="RBBM_CLOCK_CNTL_UCHE"/> - <reg32 offset="0x0010c" name="RBBM_CLOCK_CNTL2_UCHE"/> - <reg32 offset="0x0010d" name="RBBM_CLOCK_CNTL3_UCHE"/> - <reg32 offset="0x0010e" name="RBBM_CLOCK_CNTL4_UCHE"/> - <reg32 offset="0x0010f" name="RBBM_CLOCK_DELAY_UCHE"/> - <reg32 offset="0x00110" name="RBBM_CLOCK_HYST_UCHE"/> - <reg32 offset="0x00111" name="RBBM_CLOCK_MODE_VFD"/> - <reg32 offset="0x00112" name="RBBM_CLOCK_DELAY_VFD"/> - <reg32 offset="0x00113" name="RBBM_CLOCK_HYST_VFD"/> - <reg32 offset="0x00114" name="RBBM_CLOCK_MODE_GPC"/> - <reg32 offset="0x00115" name="RBBM_CLOCK_DELAY_GPC"/> - <reg32 offset="0x00116" name="RBBM_CLOCK_HYST_GPC"/> - <reg32 offset="0x00117" name="RBBM_CLOCK_DELAY_HLSQ_2"/> - <reg32 offset="0x00118" name="RBBM_CLOCK_CNTL_GMU_GX"/> - <reg32 offset="0x00119" name="RBBM_CLOCK_DELAY_GMU_GX"/> - <reg32 offset="0x0011a" name="RBBM_CLOCK_HYST_GMU_GX"/> - <reg32 offset="0x0011b" name="RBBM_CLOCK_MODE_HLSQ"/> - <reg32 offset="0x0011c" name="RBBM_CLOCK_DELAY_HLSQ"/> - <reg32 offset="0x0011d" name="RBBM_CLOCK_HYST_HLSQ"/> - <reg32 offset="0x0011e" name="RBBM_CGC_GLOBAL_LOAD_CMD" variants="A7XX-"/> - <reg32 offset="0x0011f" name="RBBM_CGC_P2S_TRIG_CMD" variants="A7XX-"/> - <reg32 offset="0x00120" name="RBBM_CLOCK_CNTL_TEX_FCHE"/> - <reg32 offset="0x00121" name="RBBM_CLOCK_DELAY_TEX_FCHE"/> + <reg32 offset="0x01a" name="RBBM_WAIT_IDLE_CLOCKS_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x01b" name="RBBM_WAIT_IDLE_CLOCKS_CNTL2" variants="A6XX-A7XX"/> + <reg32 offset="0x010" name="RBBM_WAIT_IDLE_CLOCKS_CNTL" variants="A8XX-"/> + <reg32 offset="0x011" name="RBBM_WAIT_IDLE_CLOCKS_CNTL2" variants="A8XX-"/> + + <reg32 offset="0x0001f" name="RBBM_INTERFACE_HANG_INT_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x0002f" name="RBBM_INTERFACE_HANG_INT_CNTL" variants="A8XX-"/> + <reg32 offset="0x00037" name="RBBM_INT_CLEAR_CMD" type="A6XX_RBBM_INT_0_MASK" variants="A6XX-A7XX"/> + <reg32 offset="0x00061" name="RBBM_INT_CLEAR_CMD" type="A6XX_RBBM_INT_0_MASK" variants="A8XX-"/> + <reg32 offset="0x00038" name="RBBM_INT_0_MASK" type="A6XX_RBBM_INT_0_MASK" variants="A6XX-A7XX"/> + <reg32 offset="0x00062" name="RBBM_INT_0_MASK" type="A6XX_RBBM_INT_0_MASK" variants="A8XX-"/> + <reg32 offset="0x0003a" name="RBBM_INT_2_MASK" variants="A7XX"/> + <reg32 offset="0x00064" name="RBBM_INT_2_MASK" variants="A8XX-"/> + <reg32 offset="0x00042" name="RBBM_SP_HYST_CNT" variants="A6XX-A7XX"/> + <reg32 offset="0x00043" name="RBBM_SW_RESET_CMD" variants="A6XX-A7XX"/> + <reg32 offset="0x00073" name="RBBM_SW_RESET_CMD" variants="A8XX-"/> + <reg32 offset="0x00044" name="RBBM_RAC_THRESHOLD_CNT" variants="A6XX-A7XX"/> + <reg32 offset="0x00029" name="RBBM_RAC_THRESHOLD_CNT" variants="A8XX-"/> + <reg32 offset="0x00045" name="RBBM_BLOCK_SW_RESET_CMD" variants="A6XX"/> + <reg32 offset="0x00046" name="RBBM_BLOCK_SW_RESET_CMD2" variants="A6XX"/> + <reg32 offset="0x000ad" name="RBBM_CLOCK_CNTL_GLOBAL" variants="A7XX"/> + <reg32 offset="0x0009a" name="RBBM_CLOCK_CNTL_GLOBAL" variants="A8XX-"/> + <reg32 offset="0x07d" name="RBBM_POWER_UP_RESET_SW_OVERRIDE" variants="A8XX-"/> + <reg32 offset="0x07e" name="RBBM_POWER_UP_RESET_SW_BV_OVERRIDE" variants="A8XX-"/> + <reg32 offset="0x000ae" name="RBBM_CLOCK_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x000b0" name="RBBM_CLOCK_CNTL_SP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000b1" name="RBBM_CLOCK_CNTL_SP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000b2" name="RBBM_CLOCK_CNTL_SP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000b3" name="RBBM_CLOCK_CNTL_SP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000b4" name="RBBM_CLOCK_CNTL2_SP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000b5" name="RBBM_CLOCK_CNTL2_SP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000b6" name="RBBM_CLOCK_CNTL2_SP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000b7" name="RBBM_CLOCK_CNTL2_SP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000b8" name="RBBM_CLOCK_DELAY_SP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000b9" name="RBBM_CLOCK_DELAY_SP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000ba" name="RBBM_CLOCK_DELAY_SP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000bb" name="RBBM_CLOCK_DELAY_SP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000bc" name="RBBM_CLOCK_HYST_SP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000bd" name="RBBM_CLOCK_HYST_SP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000be" name="RBBM_CLOCK_HYST_SP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000bf" name="RBBM_CLOCK_HYST_SP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000c0" name="RBBM_CLOCK_CNTL_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000c1" name="RBBM_CLOCK_CNTL_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000c2" name="RBBM_CLOCK_CNTL_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000c3" name="RBBM_CLOCK_CNTL_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000c4" name="RBBM_CLOCK_CNTL2_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000c5" name="RBBM_CLOCK_CNTL2_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000c6" name="RBBM_CLOCK_CNTL2_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000c7" name="RBBM_CLOCK_CNTL2_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000c8" name="RBBM_CLOCK_CNTL3_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000c9" name="RBBM_CLOCK_CNTL3_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000ca" name="RBBM_CLOCK_CNTL3_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000cb" name="RBBM_CLOCK_CNTL3_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000cc" name="RBBM_CLOCK_CNTL4_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000cd" name="RBBM_CLOCK_CNTL4_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000ce" name="RBBM_CLOCK_CNTL4_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000cf" name="RBBM_CLOCK_CNTL4_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000d0" name="RBBM_CLOCK_DELAY_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000d1" name="RBBM_CLOCK_DELAY_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000d2" name="RBBM_CLOCK_DELAY_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000d3" name="RBBM_CLOCK_DELAY_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000d4" name="RBBM_CLOCK_DELAY2_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000d5" name="RBBM_CLOCK_DELAY2_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000d6" name="RBBM_CLOCK_DELAY2_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000d7" name="RBBM_CLOCK_DELAY2_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000d8" name="RBBM_CLOCK_DELAY3_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000d9" name="RBBM_CLOCK_DELAY3_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000da" name="RBBM_CLOCK_DELAY3_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000db" name="RBBM_CLOCK_DELAY3_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000dc" name="RBBM_CLOCK_DELAY4_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000dd" name="RBBM_CLOCK_DELAY4_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000de" name="RBBM_CLOCK_DELAY4_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000df" name="RBBM_CLOCK_DELAY4_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000e0" name="RBBM_CLOCK_HYST_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000e1" name="RBBM_CLOCK_HYST_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000e2" name="RBBM_CLOCK_HYST_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000e3" name="RBBM_CLOCK_HYST_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000e4" name="RBBM_CLOCK_HYST2_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000e5" name="RBBM_CLOCK_HYST2_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000e6" name="RBBM_CLOCK_HYST2_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000e7" name="RBBM_CLOCK_HYST2_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000e8" name="RBBM_CLOCK_HYST3_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000e9" name="RBBM_CLOCK_HYST3_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000ea" name="RBBM_CLOCK_HYST3_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000eb" name="RBBM_CLOCK_HYST3_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000ec" name="RBBM_CLOCK_HYST4_TP0" variants="A6XX-A7XX"/> + <reg32 offset="0x000ed" name="RBBM_CLOCK_HYST4_TP1" variants="A6XX-A7XX"/> + <reg32 offset="0x000ee" name="RBBM_CLOCK_HYST4_TP2" variants="A6XX-A7XX"/> + <reg32 offset="0x000ef" name="RBBM_CLOCK_HYST4_TP3" variants="A6XX-A7XX"/> + <reg32 offset="0x000f0" name="RBBM_CLOCK_CNTL_RB0" variants="A6XX-A7XX"/> + <reg32 offset="0x000f1" name="RBBM_CLOCK_CNTL_RB1" variants="A6XX-A7XX"/> + <reg32 offset="0x000f2" name="RBBM_CLOCK_CNTL_RB2" variants="A6XX-A7XX"/> + <reg32 offset="0x000f3" name="RBBM_CLOCK_CNTL_RB3" variants="A6XX-A7XX"/> + <reg32 offset="0x000f4" name="RBBM_CLOCK_CNTL2_RB0" variants="A6XX-A7XX"/> + <reg32 offset="0x000f5" name="RBBM_CLOCK_CNTL2_RB1" variants="A6XX-A7XX"/> + <reg32 offset="0x000f6" name="RBBM_CLOCK_CNTL2_RB2" variants="A6XX-A7XX"/> + <reg32 offset="0x000f7" name="RBBM_CLOCK_CNTL2_RB3" variants="A6XX-A7XX"/> + <reg32 offset="0x000f8" name="RBBM_CLOCK_CNTL_CCU0" variants="A6XX-A7XX"/> + <reg32 offset="0x000f9" name="RBBM_CLOCK_CNTL_CCU1" variants="A6XX-A7XX"/> + <reg32 offset="0x000fa" name="RBBM_CLOCK_CNTL_CCU2" variants="A6XX-A7XX"/> + <reg32 offset="0x000fb" name="RBBM_CLOCK_CNTL_CCU3" variants="A6XX-A7XX"/> + <reg32 offset="0x00100" name="RBBM_CLOCK_HYST_RB_CCU0" variants="A6XX-A7XX"/> + <reg32 offset="0x00101" name="RBBM_CLOCK_HYST_RB_CCU1" variants="A6XX-A7XX"/> + <reg32 offset="0x00102" name="RBBM_CLOCK_HYST_RB_CCU2" variants="A6XX-A7XX"/> + <reg32 offset="0x00103" name="RBBM_CLOCK_HYST_RB_CCU3" variants="A6XX-A7XX"/> + <reg32 offset="0x00104" name="RBBM_CLOCK_CNTL_RAC" variants="A6XX-A7XX"/> + <reg32 offset="0x00105" name="RBBM_CLOCK_CNTL2_RAC" variants="A6XX-A7XX"/> + <reg32 offset="0x00106" name="RBBM_CLOCK_DELAY_RAC" variants="A6XX-A7XX"/> + <reg32 offset="0x00107" name="RBBM_CLOCK_HYST_RAC" variants="A6XX-A7XX"/> + <reg32 offset="0x00108" name="RBBM_CLOCK_CNTL_TSE_RAS_RBBM" variants="A6XX-A7XX"/> + <reg32 offset="0x00109" name="RBBM_CLOCK_DELAY_TSE_RAS_RBBM" variants="A6XX-A7XX"/> + <reg32 offset="0x0010a" name="RBBM_CLOCK_HYST_TSE_RAS_RBBM" variants="A6XX-A7XX"/> + <reg32 offset="0x0010b" name="RBBM_CLOCK_CNTL_UCHE" variants="A6XX-A7XX"/> + <reg32 offset="0x0010c" name="RBBM_CLOCK_CNTL2_UCHE" variants="A6XX-A7XX"/> + <reg32 offset="0x0010d" name="RBBM_CLOCK_CNTL3_UCHE" variants="A6XX-A7XX"/> + <reg32 offset="0x0010e" name="RBBM_CLOCK_CNTL4_UCHE" variants="A6XX-A7XX"/> + <reg32 offset="0x0010f" name="RBBM_CLOCK_DELAY_UCHE" variants="A6XX-A7XX"/> + <reg32 offset="0x00110" name="RBBM_CLOCK_HYST_UCHE" variants="A6XX-A7XX"/> + <reg32 offset="0x00111" name="RBBM_CLOCK_MODE_VFD" variants="A6XX-A7XX"/> + <reg32 offset="0x00112" name="RBBM_CLOCK_DELAY_VFD" variants="A6XX-A7XX"/> + <reg32 offset="0x00113" name="RBBM_CLOCK_HYST_VFD" variants="A6XX-A7XX"/> + <reg32 offset="0x00114" name="RBBM_CLOCK_MODE_GPC" variants="A6XX-A7XX"/> + <reg32 offset="0x00115" name="RBBM_CLOCK_DELAY_GPC" variants="A6XX-A7XX"/> + <reg32 offset="0x00116" name="RBBM_CLOCK_HYST_GPC" variants="A6XX-A7XX"/> + <reg32 offset="0x00117" name="RBBM_CLOCK_DELAY_HLSQ_2" variants="A6XX-A7XX"/> + <reg32 offset="0x00118" name="RBBM_CLOCK_CNTL_GMU_GX" variants="A6XX-A7XX"/> + <reg32 offset="0x00119" name="RBBM_CLOCK_DELAY_GMU_GX" variants="A6XX-A7XX"/> + <reg32 offset="0x0011a" name="RBBM_CLOCK_HYST_GMU_GX" variants="A6XX-A7XX"/> + <reg32 offset="0x0011b" name="RBBM_CLOCK_MODE_HLSQ" variants="A6XX-A7XX"/> + <reg32 offset="0x0011c" name="RBBM_CLOCK_DELAY_HLSQ" variants="A6XX-A7XX"/> + <reg32 offset="0x0011d" name="RBBM_CLOCK_HYST_HLSQ" variants="A6XX-A7XX"/> + <reg32 offset="0x0011e" name="RBBM_CGC_GLOBAL_LOAD_CMD" variants="A7XX"/> + <reg32 offset="0x0009b" name="RBBM_CGC_GLOBAL_LOAD_CMD" variants="A8XX-"/> + <reg32 offset="0x0011f" name="RBBM_CGC_P2S_TRIG_CMD" variants="A7XX"/> + <reg32 offset="0x0009c" name="RBBM_CGC_P2S_TRIG_CMD" variants="A8XX-"/> + <reg32 offset="0x00120" name="RBBM_CGC_P2S_CNTL" variants="A7XX"/> + <reg32 offset="0x0009d" name="RBBM_CGC_P2S_CNTL" variants="A8XX-"/> + <reg32 offset="0x00120" name="RBBM_CLOCK_CNTL_TEX_FCHE" variants="A6XX"/> + <reg32 offset="0x00121" name="RBBM_CLOCK_DELAY_TEX_FCHE" variants="A6XX-A7XX"/> <reg32 offset="0x00122" name="RBBM_CLOCK_HYST_TEX_FCHE" variants="A6XX"/> - <reg32 offset="0x00122" name="RBBM_CGC_P2S_STATUS" variants="A7XX-"> + <reg32 offset="0x00122" name="RBBM_CGC_P2S_STATUS" variants="A7XX"> <bitfield name="TXDONE" pos="0" type="boolean"/> </reg32> - <reg32 offset="0x00123" name="RBBM_CLOCK_CNTL_FCHE"/> - <reg32 offset="0x00124" name="RBBM_CLOCK_DELAY_FCHE"/> - <reg32 offset="0x00125" name="RBBM_CLOCK_HYST_FCHE"/> - <reg32 offset="0x00126" name="RBBM_CLOCK_CNTL_MHUB"/> - <reg32 offset="0x00127" name="RBBM_CLOCK_DELAY_MHUB"/> - <reg32 offset="0x00128" name="RBBM_CLOCK_HYST_MHUB"/> - <reg32 offset="0x00129" name="RBBM_CLOCK_DELAY_GLC"/> - <reg32 offset="0x0012a" name="RBBM_CLOCK_HYST_GLC"/> - <reg32 offset="0x0012b" name="RBBM_CLOCK_CNTL_GLC"/> - <reg32 offset="0x0012f" name="RBBM_CLOCK_HYST2_VFD" variants="A7XX-"/> - <reg32 offset="0x005ff" name="RBBM_LPAC_GBIF_CLIENT_QOS_CNTL"/> + <reg32 offset="0x09f" name="RBBM_CGC_P2S_STATUS" variants="A8XX-"> + <bitfield name="TXDONE" pos="0" type="boolean"/> + </reg32> + <reg32 offset="0x00123" name="RBBM_CLOCK_CNTL_FCHE" variants="A6XX-A7XX"/> + <reg32 offset="0x00124" name="RBBM_CLOCK_DELAY_FCHE" variants="A6XX-A7XX"/> + <reg32 offset="0x00125" name="RBBM_CLOCK_HYST_FCHE" variants="A6XX-A7XX"/> + <reg32 offset="0x00126" name="RBBM_CLOCK_CNTL_MHUB" variants="A6XX-A7XX"/> + <reg32 offset="0x00127" name="RBBM_CLOCK_DELAY_MHUB" variants="A6XX-A7XX"/> + <reg32 offset="0x00128" name="RBBM_CLOCK_HYST_MHUB" variants="A6XX-A7XX"/> + <reg32 offset="0x00129" name="RBBM_CLOCK_DELAY_GLC" variants="A6XX-A7XX"/> + <reg32 offset="0x0012a" name="RBBM_CLOCK_HYST_GLC" variants="A6XX-A7XX"/> + <reg32 offset="0x0012b" name="RBBM_CLOCK_CNTL_GLC" variants="A6XX-A7XX"/> + <reg32 offset="0x0012f" name="RBBM_CLOCK_HYST2_VFD" variants="A7XX"/> + <reg32 offset="0x00195" name="RBBM_CGC_0_PC" variants="A7XX"/> + <reg32 offset="0x0010b" name="RBBM_CGC_0_PC" variants="A8XX-"/> + + <reg32 offset="0x005ff" name="RBBM_LPAC_GBIF_CLIENT_QOS_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x00009" name="RBBM_LPAC_GBIF_CLIENT_QOS_CNTL" variants="A8XX-"/> <reg32 offset="0x0600" name="DBGC_CFG_DBGBUS_SEL_A"/> <reg32 offset="0x0601" name="DBGC_CFG_DBGBUS_SEL_B"/> @@ -610,6 +964,8 @@ by a particular renderpass/blit. <reg32 offset="0x0605" name="DBGC_CFG_DBGBUS_CNTLM"> <bitfield high="27" low="24" name="ENABLE"/> </reg32> + <reg32 offset="0x0606" name="DBGC_CFG_DBGBUS_OPL"/> + <reg32 offset="0x0607" name="DBGC_CFG_DBGBUS_OPE"/> <reg32 offset="0x0608" name="DBGC_CFG_DBGBUS_IVTL_0"/> <reg32 offset="0x0609" name="DBGC_CFG_DBGBUS_IVTL_1"/> <reg32 offset="0x060a" name="DBGC_CFG_DBGBUS_IVTL_2"/> @@ -638,72 +994,276 @@ by a particular renderpass/blit. <bitfield high="27" low="24" name="BYTEL14"/> <bitfield high="31" low="28" name="BYTEL15"/> </reg32> + <reg32 offset="0x0612" name="DBGC_CFG_DBGBUS_IVTE_0"/> + <reg32 offset="0x0613" name="DBGC_CFG_DBGBUS_IVTE_1"/> + <reg32 offset="0x0614" name="DBGC_CFG_DBGBUS_IVTE_2"/> + <reg32 offset="0x0615" name="DBGC_CFG_DBGBUS_IVTE_3"/> + <reg32 offset="0x0616" name="DBGC_CFG_DBGBUS_MASKE_0"/> + <reg32 offset="0x0617" name="DBGC_CFG_DBGBUS_MASKE_1"/> + <reg32 offset="0x0618" name="DBGC_CFG_DBGBUS_MASKE_2"/> + <reg32 offset="0x0619" name="DBGC_CFG_DBGBUS_MASKE_3"/> + <reg32 offset="0x061a" name="DBGC_CFG_DBGBUS_NIBBLEE"/> + <reg32 offset="0x061b" name="DBGC_CFG_DBGBUS_PTRC0"/> + <reg32 offset="0x061c" name="DBGC_CFG_DBGBUS_PTRC1"/> + <reg32 offset="0x061d" name="DBGC_CFG_DBGBUS_LOADREG"/> + <reg32 offset="0x061e" name="DBGC_CFG_DBGBUS_IDX"/> + <reg32 offset="0x061f" name="DBGC_CFG_DBGBUS_CLRC"/> + <reg32 offset="0x0620" name="DBGC_CFG_DBGBUS_LOADIVT"/> + <reg32 offset="0x0621" name="DBGC_VBIF_DBG_CNTL"/> + <reg32 offset="0x0622" name="DBGC_DBG_LO_HI_GPIO"/> + <reg32 offset="0x0623" name="DBGC_EXT_TRACE_BUS_CNTL"/> + <reg32 offset="0x0624" name="DBGC_READ_AHB_THROUGH_DBG"/> + <reg32 offset="0x0625" name="DBGC_CFG_DBGBUS_EVENT_LOGIC"/> + <reg32 offset="0x0626" name="DBGC_CFG_DBGBUS_OVER"/> + <reg32 offset="0x0627" name="DBGC_CFG_DBGBUS_COUNT0"/> + <reg32 offset="0x0628" name="DBGC_CFG_DBGBUS_COUNT1"/> + <reg32 offset="0x0629" name="DBGC_CFG_DBGBUS_COUNT2"/> + <reg32 offset="0x062a" name="DBGC_CFG_DBGBUS_COUNT3"/> + <reg32 offset="0x062b" name="DBGC_CFG_DBGBUS_COUNT4"/> + <reg32 offset="0x062c" name="DBGC_CFG_DBGBUS_COUNT5"/> + <reg32 offset="0x062d" name="DBGC_CFG_DBGBUS_TRACE_ADDR"/> + <reg32 offset="0x062e" name="DBGC_CFG_DBGBUS_TRACE_BUF0"/> <reg32 offset="0x062f" name="DBGC_CFG_DBGBUS_TRACE_BUF1"/> <reg32 offset="0x0630" name="DBGC_CFG_DBGBUS_TRACE_BUF2"/> - <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2" variants="A6XX"/> - <reg32 offset="0x0CD8" name="VSC_UNKNOWN_0CD8" variants="A7XX"> - <doc> - Set to true when binning, isn't changed afterwards - </doc> - <bitfield name="BINNING" pos="0" type="boolean"/> - </reg32> + <reg32 offset="0x0631" name="DBGC_CFG_DBGBUS_TRACE_BUF3"/> + <reg32 offset="0x0632" name="DBGC_CFG_DBGBUS_TRACE_BUF4"/> + <reg32 offset="0x0633" name="DBGC_CFG_DBGBUS_MISR0"/> + <reg32 offset="0x0634" name="DBGC_CFG_DBGBUS_MISR1"/> + <reg32 offset="0x0635" name="DBGC_EVT_CFG"/> + <reg32 offset="0x0636" name="DBGC_EVT_INTF_SEL_0"/> + <reg32 offset="0x0637" name="DBGC_EVT_INTF_SEL_1"/> + <reg32 offset="0x0638" name="DBGC_EVT_SLICE_CFG"/> + <reg32 offset="0x0639" name="DBGC_QDSS_TIMESTAMP_0"/> + <reg32 offset="0x063a" name="DBGC_QDSS_TIMESTAMP_1"/> + <reg32 offset="0x063b" name="DBGC_ECO_CNTL"/> + <reg32 offset="0x063c" name="DBGC_AHB_DBG_CNTL"/> + <reg32 offset="0x063d" name="DBGC_EVT_INTF_SEL_2"/> + <reg32 offset="0x0640" name="DBGC_CFG_DBGBUS_PONG_SEL_A"/> + <reg32 offset="0x0641" name="DBGC_CFG_DBGBUS_PONG_SEL_B"/> + <reg32 offset="0x0642" name="DBGC_CFG_DBGBUS_PONG_SEL_C"/> + <reg32 offset="0x0643" name="DBGC_CFG_DBGBUS_PONG_SEL_D"/> + <reg32 offset="0x0644" name="DBGC_CFG_DBGBUS_MISC_MODE"/> + <reg32 offset="0x0650" name="DBGC_EVT_INTF_SEL_3_0"/> + <reg32 offset="0x0651" name="DBGC_EVT_INTF_SEL_3_1"/> + <reg32 offset="0x0652" name="DBGC_EVT_INTF_SEL_3_2"/> + <reg32 offset="0x0653" name="DBGC_EVT_INTF_SEL_3_3"/> + <reg32 offset="0x0654" name="DBGC_EVT_INTF_SEL_3_4"/> + <reg32 offset="0x0655" name="DBGC_EVT_INTF_SEL_3_5"/> + <reg32 offset="0x0660" name="DBGC_TRACE_BUFFER_STATUS"/> + <reg32 offset="0x0661" name="DBGC_TRACE_BUFFER_CMD"/> + <reg32 offset="0x0662" name="DBGC_DBG_TRACE_BUFFER_RD_ADDR"/> + <reg32 offset="0x0663" name="DBGC_DBG_TRACE_BUFFER_RD_DATA"/> + <reg32 offset="0x0664" name="DBGC_TRACE_BUFFER_ATB_RD_STATUS"/> + <reg32 offset="0x0665" name="DBGC_SMMU_FAULT_BLOCK_HALT_CFG"/> + <reg32 offset="0x0666" name="DBGC_DBG_LOPC_SB_RD_ADDR"/> + <reg32 offset="0x0667" name="DBGC_DBG_LOPC_SB_RD_DATA"/> + <reg32 offset="0x0668" name="DBGC_DBG_LOPC_SB_WR_ADDR"/> + <reg32 offset="0x0669" name="DBGC_DBG_LOPC_SB_WR_DATA"/> + <reg32 offset="0x066a" name="DBGC_INTERRUPT_STATUS"/> + <reg64 offset="0x0680" name="DBGC_GBIF_DBG_BASE"/> + <reg32 offset="0x0682" name="DBGC_GBIF_DBG_BUFF_SIZE"/> + <reg32 offset="0x0683" name="DBGC_GBIF_DBG_CNTL"/> + <reg32 offset="0x0684" name="DBGC_GBIF_DBG_CMD"/> + <reg32 offset="0x0685" name="DBGC_GBIF_DBG_STATUS"/> + + <reg32 offset="0x0700" name="DBGC_SCOPE_PERF_COUNTER_CFG_US" variants="A8XX-"/> + <reg32 offset="0x0701" name="DBGC_CFG_PERF_TRIG_CLUSTER_FE_US" variants="A8XX-"/> + <reg32 offset="0x0702" name="DBGC_CFG_PERF_TRIG_CLUSTER_VPC_US" variants="A8XX-"/> + <reg32 offset="0x0703" name="DBGC_CFG_PERF_TRIG_CLUSTER_SP_VS_US" variants="A8XX-"/> + <reg32 offset="0x0704" name="DBGC_CFG_PERF_TRIG_CLUSTER_SP_PS_US" variants="A8XX-"/> + <reg32 offset="0x0707" name="DBGC_CFG_PERF_TRIG_CLUSTER_NONE_US" variants="A8XX-"/> + <reg32 offset="0x0708" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_FE_US" variants="A8XX-"/> + <reg32 offset="0x0709" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_VPC_US" variants="A8XX-"/> + <reg32 offset="0x070a" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_SP_VS_US" variants="A8XX-"/> + <reg32 offset="0x070f" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_NONE_US" variants="A8XX-"/> + <reg32 offset="0x0710" name="DBGC_CFG_PERF_COUNTER_SEL_FE_US" variants="A8XX-"/> + <reg32 offset="0x0711" name="DBGC_CFG_PERF_COUNTER_SEL_FE_US_1" variants="A8XX-"/> + <reg32 offset="0x0712" name="DBGC_CFG_PERF_COUNTER_SEL_FE_US_2" variants="A8XX-"/> + <reg32 offset="0x0713" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_US" variants="A8XX-"/> + <reg32 offset="0x0714" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_US_1" variants="A8XX-"/> + <reg32 offset="0x0715" name="DBGC_CFG_PERF_COUNTER_SEL_SP_VS_US" variants="A8XX-"/> + <reg32 offset="0x0716" name="DBGC_CFG_PERF_COUNTER_SEL_SP_PS_US" variants="A8XX-"/> + <reg32 offset="0x0720" name="DBGC_CFG_PERF_COUNTER_SEL_NONE_US" variants="A8XX-"/> + <reg32 offset="0x0721" name="DBGC_CFG_PERF_COUNTER_SEL_NONE_US_1" variants="A8XX-"/> + <reg32 offset="0x0722" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_US" variants="A8XX-"/> + <reg32 offset="0x0723" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_US_1" variants="A8XX-"/> + <reg32 offset="0x0724" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_US_2" variants="A8XX-"/> + <reg32 offset="0x0730" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_US" variants="A8XX-"/> + <reg32 offset="0x0731" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_US_1" variants="A8XX-"/> + <reg32 offset="0x0732" name="DBGC_CFG_BV_PERF_COUNTER_SEL_SP_VS_US" variants="A8XX-"/> + <reg32 offset="0x0740" name="DBGC_CFG_BV_PERF_COUNTER_SEL_NONE_US" variants="A8XX-"/> + <reg32 offset="0x0742" name="DBGC_CFG_PERF_TIMESTAMP_TRIG_SEL_US" variants="A8XX-"/> + <reg32 offset="0x0743" name="DBGC_CFG_BV_PERF_TIMESTAMP_TRIG_SEL_US" variants="A8XX-"/> + <reg64 offset="0x0744" name="DBGC_CFG_GBIF_BR_PERF_CNTR_BASE" variants="A8XX-"/> + <reg32 offset="0x0746" name="DBGC_CFG_GBIF_BR_BUFFER_SIZE" variants="A8XX-"/> + <reg64 offset="0x0747" name="DBGC_CFG_GBIF_BV_PERF_CNTR_BASE" variants="A8XX-"/> + <reg32 offset="0x0749" name="DBGC_CFG_GBIF_BV_BUFFER_SIZE" variants="A8XX-"/> + <reg32 offset="0x074a" name="DBGC_CFG_GBIF_QOS_CTRL" variants="A8XX-"/> + <reg32 offset="0x0750" name="DBGC_GBIF_BR_PERF_CNTR_WRITE_POINTER" variants="A8XX-"/> + <reg32 offset="0x0751" name="DBGC_GBIF_BV_PERF_CNTR_WRITE_POINTER" variants="A8XX-"/> + <reg32 offset="0x0752" name="DBGC_PERF_COUNTER_FE_LOCAL_BATCH_ID" variants="A8XX-"/> + <reg32 offset="0x0753" name="DBGC_CFG_PERF_WAIT_IDLE_CLOCKS_CNTL" variants="A8XX-"/> + <reg32 offset="0x0754" name="DBGC_PERF_COUNTER_SCOPING_CMD_US" variants="A8XX-"/> + <reg32 offset="0x0755" name="DBGC_PERF_SKEW_BUFFER_INIT_CMD" variants="A8XX-"/> + <reg32 offset="0x0759" name="DBGC_LOPC_INTERRUPT_STATUS" variants="A8XX-"/> + <reg32 offset="0x075a" name="DBGC_LOPC_BUFFER_PTR_STATUS" variants="A8XX-"/> + <reg32 offset="0x075b" name="DBGC_PERF_SCOPING_STATUS" variants="A8XX-"/> + <reg32 offset="0x075c" name="DBGC_PERF_COUNTER_PKT_STATUS" variants="A8XX-"/> + <reg32 offset="0x0760" name="DBGC_GC_LIVE_MBX_PKT_STATUS" variants="A8XX-"/> + <reg32 offset="0x0761" name="DBGC_GC_ALW_MBX_PKT_STATUS" variants="A8XX-"/> + <reg32 offset="0x0762" name="DBGC_AO_CNTR_LO_STATUS" variants="A8XX-"/> + <reg32 offset="0x0763" name="DBGC_AO_CNTR_HI_STATUS" variants="A8XX-"/> + <reg32 offset="0x0770" name="DBGC_LOPC_GC_SB_DEPTH_STATUS" variants="A8XX-"/> + <reg32 offset="0x0780" name="DBGC_LPAC_SCOPE_PERF_COUNTER_CFG_US" variants="A8XX-"/> + <reg32 offset="0x0781" name="DBGC_CFG_PERF_TRIG_LPAC_US" variants="A8XX-"/> + <reg32 offset="0x0782" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_US" variants="A8XX-"/> + <reg32 offset="0x0783" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_US_1" variants="A8XX-"/> + <reg32 offset="0x0784" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_US_2" variants="A8XX-"/> + <reg32 offset="0x0785" name="DBGC_CFG_PERF_TIMESTAMP_TRIG_SEL_LPAC_US" variants="A8XX-"/> + <reg64 offset="0x0786" name="DBGC_CFG_GBIF_LPAC_PERF_CNTR_BASE" variants="A8XX-"/> + <reg32 offset="0x0788" name="DBGC_CFG_GBIF_LPAC_BUFFER_SIZE" variants="A8XX-"/> + <reg32 offset="0x0789" name="DBGC_GBIF_LPAC_PERF_CNTR_WRITE_POINTER" variants="A8XX-"/> + <reg32 offset="0x078a" name="DBGC_CFG_LPAC_PERF_WAIT_IDLE_CLOCKS_CNTL" variants="A8XX-"/> + <reg32 offset="0x078b" name="DBGC_LPAC_PERF_COUNTER_SCOPING_CMD_US" variants="A8XX-"/> + <reg32 offset="0x078c" name="DBGC_LPAC_MBX_PKT_STATUS" variants="A8XX-"/> + <reg32 offset="0x078d" name="DBGC_LPAC_PERF_SCOPING_STATUS" variants="A8XX-"/> + <reg32 offset="0x0790" name="DBGC_LOPC_LPAC_SB_DEPTH_STATUS" variants="A8XX-"/> + <reg32 offset="0x07a0" name="DBGC_SCOPE_PERF_COUNTER_CFG_S" variants="A8XX-"/> + <reg32 offset="0x07a1" name="DBGC_CFG_PERF_TRIG_CLUSTER_FE_S" variants="A8XX-"/> + <reg32 offset="0x07a2" name="DBGC_CFG_PERF_TRIG_CLUSTER_SP_VS" variants="A8XX-"/> + <reg32 offset="0x07a3" name="DBGC_CFG_PERF_TRIG_CLUSTER_VPC_VS" variants="A8XX-"/> + <reg32 offset="0x07a4" name="DBGC_CFG_PERF_TRIG_CLUSTER_GRAS" variants="A8XX-"/> + <reg32 offset="0x07a5" name="DBGC_CFG_PERF_TRIG_CLUSTER_SP_PS" variants="A8XX-"/> + <reg32 offset="0x07a6" name="DBGC_CFG_PERF_TRIG_CLUSTER_VPC_PS" variants="A8XX-"/> + <reg32 offset="0x07a7" name="DBGC_CFG_PERF_TRIG_CLUSTER_PS" variants="A8XX-"/> + <reg32 offset="0x07a8" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_FE_S" variants="A8XX-"/> + <reg32 offset="0x07a9" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_SP_VS" variants="A8XX-"/> + <reg32 offset="0x07aa" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_VPC_VS" variants="A8XX-"/> + <reg32 offset="0x07ab" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_GRAS" variants="A8XX-"/> + <reg32 offset="0x07ac" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_VPC_PS" variants="A8XX-"/> + <reg32 offset="0x07ad" name="DBGC_CFG_PERF_COUNTER_SEL_FE_S" variants="A8XX-"/> + <reg32 offset="0x07ae" name="DBGC_CFG_PERF_COUNTER_SEL_FE_S_1" variants="A8XX-"/> + <reg32 offset="0x07af" name="DBGC_CFG_PERF_COUNTER_SEL_FE_S_2" variants="A8XX-"/> + <reg32 offset="0x07b0" name="DBGC_CFG_PERF_COUNTER_SEL_FE_S_3" variants="A8XX-"/> + <reg32 offset="0x07b1" name="DBGC_CFG_PERF_COUNTER_SEL_SP_VS" variants="A8XX-"/> + <reg32 offset="0x07b2" name="DBGC_CFG_PERF_COUNTER_SEL_SP_VS_1" variants="A8XX-"/> + <reg32 offset="0x07b3" name="DBGC_CFG_PERF_COUNTER_SEL_SP_VS_2" variants="A8XX-"/> + <reg32 offset="0x07b4" name="DBGC_CFG_PERF_COUNTER_SEL_SP_VS_3" variants="A8XX-"/> + <reg32 offset="0x07b5" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_VS" variants="A8XX-"/> + <reg32 offset="0x07b6" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_VS_1" variants="A8XX-"/> + <reg32 offset="0x07b7" name="DBGC_CFG_PERF_COUNTER_SEL_GRAS" variants="A8XX-"/> + <reg32 offset="0x07b8" name="DBGC_CFG_PERF_COUNTER_SEL_GRAS_1" variants="A8XX-"/> + <reg32 offset="0x07b9" name="DBGC_CFG_PERF_COUNTER_SEL_GRAS_2" variants="A8XX-"/> + <reg32 offset="0x07ba" name="DBGC_CFG_PERF_COUNTER_SEL_SP_PS" variants="A8XX-"/> + <reg32 offset="0x07bb" name="DBGC_CFG_PERF_COUNTER_SEL_SP_PS_1" variants="A8XX-"/> + <reg32 offset="0x07bc" name="DBGC_CFG_PERF_COUNTER_SEL_SP_PS_2" variants="A8XX-"/> + <reg32 offset="0x07bd" name="DBGC_CFG_PERF_COUNTER_SEL_SP_PS_3" variants="A8XX-"/> + <reg32 offset="0x07be" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_PS" variants="A8XX-"/> + <reg32 offset="0x07bf" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_PS_1" variants="A8XX-"/> + <reg32 offset="0x07c0" name="DBGC_CFG_PERF_COUNTER_SEL_PS" variants="A8XX-"/> + <reg32 offset="0x07c1" name="DBGC_CFG_PERF_COUNTER_SEL_PS_1" variants="A8XX-"/> + <reg32 offset="0x07c2" name="DBGC_CFG_PERF_COUNTER_SEL_PS_2" variants="A8XX-"/> + <reg32 offset="0x07c3" name="DBGC_CFG_PERF_COUNTER_SEL_PS_3" variants="A8XX-"/> + <reg32 offset="0x07c4" name="DBGC_CFG_PERF_TIMESTAMP_TRIG_SEL_S" variants="A8XX-"/> + <reg32 offset="0x07c5" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_S" variants="A8XX-"/> + <reg32 offset="0x07c6" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_S_1" variants="A8XX-"/> + <reg32 offset="0x07c7" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_S_2" variants="A8XX-"/> + <reg32 offset="0x07c8" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_S_3" variants="A8XX-"/> + <reg32 offset="0x07c9" name="DBGC_CFG_BV_PERF_COUNTER_SEL_SP_VS" variants="A8XX-"/> + <reg32 offset="0x07ca" name="DBGC_CFG_BV_PERF_COUNTER_SEL_SP_VS_1" variants="A8XX-"/> + <reg32 offset="0x07cb" name="DBGC_CFG_BV_PERF_COUNTER_SEL_SP_VS_2" variants="A8XX-"/> + <reg32 offset="0x07cc" name="DBGC_CFG_BV_PERF_COUNTER_SEL_SP_VS_3" variants="A8XX-"/> + <reg32 offset="0x07cd" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_VS" variants="A8XX-"/> + <reg32 offset="0x07ce" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_VS_1" variants="A8XX-"/> + <reg32 offset="0x07cf" name="DBGC_CFG_BV_PERF_COUNTER_SEL_GRAS" variants="A8XX-"/> + <reg32 offset="0x07d0" name="DBGC_CFG_BV_PERF_COUNTER_SEL_GRAS_1" variants="A8XX-"/> + <reg32 offset="0x07d1" name="DBGC_CFG_BV_PERF_COUNTER_SEL_GRAS_2" variants="A8XX-"/> + <reg32 offset="0x07d2" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_PS" variants="A8XX-"/> + <reg32 offset="0x07d3" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_PS_1" variants="A8XX-"/> + <reg32 offset="0x07d4" name="DBGC_CFG_BV_PERF_TIMESTAMP_TRIG_SEL_S" variants="A8XX-"/> + <reg32 offset="0x07d5" name="DBGC_PERF_COUNTER_SCOPING_CMD_S" variants="A8XX-"/> + <reg32 offset="0x07e0" name="DBGC_LPAC_SCOPE_PERF_COUNTER_CFG_S" variants="A8XX-"/> + <reg32 offset="0x07e1" name="DBGC_CFG_PERF_TRIG_LPAC_S" variants="A8XX-"/> + <reg32 offset="0x07e2" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_S" variants="A8XX-"/> + <reg32 offset="0x07e3" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_S_1" variants="A8XX-"/> + <reg32 offset="0x07e4" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_S_2" variants="A8XX-"/> + <reg32 offset="0x07e5" name="DBGC_CFG_PERF_TIMESTAMP_TRIG_SEL_LPAC_S" variants="A8XX-"/> + <reg32 offset="0x07e6" name="DBGC_LPAC_PERF_COUNTER_SCOPING_CMD_S" variants="A8XX-"/> + + <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2"/> <reg32 offset="0xC800" name="HLSQ_DBG_AHB_READ_APERTURE"/> <reg32 offset="0xD000" name="HLSQ_DBG_READ_SEL"/> - <reg32 offset="0x0E00" name="UCHE_ADDR_MODE_CNTL" type="a5xx_address_mode"/> + <reg32 offset="0x0E00" name="UCHE_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/> <reg32 offset="0x0E01" name="UCHE_MODE_CNTL"/> - <reg64 offset="0x0E05" name="UCHE_WRITE_RANGE_MAX"/> - <reg64 offset="0x0E07" name="UCHE_WRITE_THRU_BASE"/> - <reg64 offset="0x0E09" name="UCHE_TRAP_BASE"/> - <reg64 offset="0x0E0B" name="UCHE_GMEM_RANGE_MIN"/> - <reg64 offset="0x0E0D" name="UCHE_GMEM_RANGE_MAX"/> - <reg32 offset="0x0E17" name="UCHE_CACHE_WAYS" usage="cmd"/> + <reg64 offset="0x0E05" name="UCHE_WRITE_RANGE_MAX" variants="A6XX"/> + <reg64 offset="0x0E07" name="UCHE_WRITE_THRU_BASE" variants="A6XX-A7XX"/> + <reg64 offset="0x0E06" name="UCHE_WRITE_THRU_BASE" variants="A8XX-"/> + <reg64 offset="0x0E09" name="UCHE_TRAP_BASE" variants="A6XX-A7XX"/> + <reg64 offset="0x0E08" name="UCHE_TRAP_BASE" variants="A8XX-"/> + <reg64 offset="0x0E0B" name="UCHE_GMEM_RANGE_MIN" variants="A6XX-A7XX"/> + <reg64 offset="0x0E0D" name="UCHE_GMEM_RANGE_MAX" variants="A6XX-A7XX"/> + <reg32 offset="0x0e17" name="UCHE_CACHE_WAYS" variants="A6XX-A7XX" usage="init"/> + <reg32 offset="0x0e04" name="UCHE_CACHE_WAYS" variants="A8XX-"/> <reg32 offset="0x0E18" name="UCHE_FILTER_CNTL"/> - <reg32 offset="0x0E19" name="UCHE_CLIENT_PF" usage="cmd"> + <reg32 offset="0x0e19" name="UCHE_CLIENT_PF" variants="A6XX-A7XX" usage="init"> <bitfield high="7" low="0" name="PERFSEL"/> </reg32> - <array offset="0x0E1C" name="UCHE_PERFCTR_UCHE_SEL" stride="1" length="12"/> - <reg32 offset="0x0e3a" name="UCHE_GBIF_GX_CONFIG"/> - <reg32 offset="0x0e3c" name="UCHE_CMDQ_CONFIG"/> + <array offset="0x0e1c" name="UCHE_PERFCTR_UCHE_SEL" stride="1" length="12" variants="A6XX-A7XX"/> + <array offset="0x0e20" name="UCHE_PERFCTR_UCHE_SEL" stride="1" length="24" variants="A8XX-"/> + <reg32 offset="0x0e3a" name="UCHE_GBIF_GX_CONFIG" variants="A6XX-A7XX"/> + <reg32 offset="0x0e12" name="UCHE_GBIF_GX_CONFIG" variants="A8XX-"/> + <reg32 offset="0x0e3c" name="UCHE_CMDQ_CONFIG" variants="A6XX-A7XX"/> - <reg32 offset="0x3000" name="VBIF_VERSION"/> - <reg32 offset="0x3001" name="VBIF_CLKON"> + <reg32 offset="0x0f01" name="UCHE_CCHE_MODE_CNTL" variants="A8XX-"/> + <reg32 offset="0x0f02" name="UCHE_CCHE_CACHE_WAYS" variants="A8XX-"/> + <reg64 offset="0x0f04" name="UCHE_CCHE_WRITE_THRU_BASE" variants="A8XX-"/> + <reg64 offset="0x0f06" name="UCHE_CCHE_TRAP_BASE" variants="A8XX-"/> + <reg64 offset="0x0f08" name="UCHE_CCHE_GC_GMEM_RANGE_MIN" variants="A8XX-"/> + <reg64 offset="0x0f0a" name="UCHE_CCHE_LPAC_GMEM_RANGE_MIN" variants="A8XX-"/> + <reg32 offset="0x0f0c" name="UCHE_CCHE_HW_DBG_CNTL" variants="A8XX-"/> + + <!-- VBIF only existed on early a6xx, and was later replaced with GBIF --> + + <reg32 offset="0x3000" name="VBIF_VERSION" variants="A6XX"/> + <reg32 offset="0x3001" name="VBIF_CLKON" variants="A6XX"> <bitfield pos="1" name="FORCE_ON_TESTBUS" type="boolean"/> </reg32> - <reg32 offset="0x302A" name="VBIF_GATE_OFF_WRREQ_EN"/> - <reg32 offset="0x3080" name="VBIF_XIN_HALT_CTRL0"/> - <reg32 offset="0x3081" name="VBIF_XIN_HALT_CTRL1"/> - <reg32 offset="0x3084" name="VBIF_TEST_BUS_OUT_CTRL"/> - <reg32 offset="0x3085" name="VBIF_TEST_BUS1_CTRL0"/> - <reg32 offset="0x3086" name="VBIF_TEST_BUS1_CTRL1"> + <reg32 offset="0x302A" name="VBIF_GATE_OFF_WRREQ_EN" variants="A6XX"/> + <reg32 offset="0x3080" name="VBIF_XIN_HALT_CTRL0" variants="A6XX"/> + <reg32 offset="0x3081" name="VBIF_XIN_HALT_CTRL1" variants="A6XX"/> + <reg32 offset="0x3084" name="VBIF_TEST_BUS_OUT_CTRL" variants="A6XX"/> + <reg32 offset="0x3085" name="VBIF_TEST_BUS1_CTRL0" variants="A6XX"/> + <reg32 offset="0x3086" name="VBIF_TEST_BUS1_CTRL1" variants="A6XX"> <bitfield low="0" high="3" name="DATA_SEL"/> </reg32> - <reg32 offset="0x3087" name="VBIF_TEST_BUS2_CTRL0"/> - <reg32 offset="0x3088" name="VBIF_TEST_BUS2_CTRL1"> + <reg32 offset="0x3087" name="VBIF_TEST_BUS2_CTRL0" variants="A6XX"/> + <reg32 offset="0x3088" name="VBIF_TEST_BUS2_CTRL1" variants="A6XX"> <bitfield low="0" high="8" name="DATA_SEL"/> </reg32> - <reg32 offset="0x308c" name="VBIF_TEST_BUS_OUT"/> - <reg32 offset="0x30d0" name="VBIF_PERF_CNT_SEL0"/> - <reg32 offset="0x30d1" name="VBIF_PERF_CNT_SEL1"/> - <reg32 offset="0x30d2" name="VBIF_PERF_CNT_SEL2"/> - <reg32 offset="0x30d3" name="VBIF_PERF_CNT_SEL3"/> - <reg32 offset="0x30d8" name="VBIF_PERF_CNT_LOW0"/> - <reg32 offset="0x30d9" name="VBIF_PERF_CNT_LOW1"/> - <reg32 offset="0x30da" name="VBIF_PERF_CNT_LOW2"/> - <reg32 offset="0x30db" name="VBIF_PERF_CNT_LOW3"/> - <reg32 offset="0x30e0" name="VBIF_PERF_CNT_HIGH0"/> - <reg32 offset="0x30e1" name="VBIF_PERF_CNT_HIGH1"/> - <reg32 offset="0x30e2" name="VBIF_PERF_CNT_HIGH2"/> - <reg32 offset="0x30e3" name="VBIF_PERF_CNT_HIGH3"/> - <reg32 offset="0x3100" name="VBIF_PERF_PWR_CNT_EN0"/> - <reg32 offset="0x3101" name="VBIF_PERF_PWR_CNT_EN1"/> - <reg32 offset="0x3102" name="VBIF_PERF_PWR_CNT_EN2"/> - <reg32 offset="0x3110" name="VBIF_PERF_PWR_CNT_LOW0"/> - <reg32 offset="0x3111" name="VBIF_PERF_PWR_CNT_LOW1"/> - <reg32 offset="0x3112" name="VBIF_PERF_PWR_CNT_LOW2"/> - <reg32 offset="0x3118" name="VBIF_PERF_PWR_CNT_HIGH0"/> - <reg32 offset="0x3119" name="VBIF_PERF_PWR_CNT_HIGH1"/> - <reg32 offset="0x311a" name="VBIF_PERF_PWR_CNT_HIGH2"/> - + <reg32 offset="0x308c" name="VBIF_TEST_BUS_OUT" variants="A6XX"/> + <reg32 offset="0x30d0" name="VBIF_PERF_CNT_SEL0" variants="A6XX"/> + <reg32 offset="0x30d1" name="VBIF_PERF_CNT_SEL1" variants="A6XX"/> + <reg32 offset="0x30d2" name="VBIF_PERF_CNT_SEL2" variants="A6XX"/> + <reg32 offset="0x30d3" name="VBIF_PERF_CNT_SEL3" variants="A6XX"/> + <reg32 offset="0x30d8" name="VBIF_PERF_CNT_LOW0" variants="A6XX"/> + <reg32 offset="0x30d9" name="VBIF_PERF_CNT_LOW1" variants="A6XX"/> + <reg32 offset="0x30da" name="VBIF_PERF_CNT_LOW2" variants="A6XX"/> + <reg32 offset="0x30db" name="VBIF_PERF_CNT_LOW3" variants="A6XX"/> + <reg32 offset="0x30e0" name="VBIF_PERF_CNT_HIGH0" variants="A6XX"/> + <reg32 offset="0x30e1" name="VBIF_PERF_CNT_HIGH1" variants="A6XX"/> + <reg32 offset="0x30e2" name="VBIF_PERF_CNT_HIGH2" variants="A6XX"/> + <reg32 offset="0x30e3" name="VBIF_PERF_CNT_HIGH3" variants="A6XX"/> + <reg32 offset="0x3100" name="VBIF_PERF_PWR_CNT_EN0" variants="A6XX"/> + <reg32 offset="0x3101" name="VBIF_PERF_PWR_CNT_EN1" variants="A6XX"/> + <reg32 offset="0x3102" name="VBIF_PERF_PWR_CNT_EN2" variants="A6XX"/> + <reg32 offset="0x3110" name="VBIF_PERF_PWR_CNT_LOW0" variants="A6XX"/> + <reg32 offset="0x3111" name="VBIF_PERF_PWR_CNT_LOW1" variants="A6XX"/> + <reg32 offset="0x3112" name="VBIF_PERF_PWR_CNT_LOW2" variants="A6XX"/> + <reg32 offset="0x3118" name="VBIF_PERF_PWR_CNT_HIGH0" variants="A6XX"/> + <reg32 offset="0x3119" name="VBIF_PERF_PWR_CNT_HIGH1" variants="A6XX"/> + <reg32 offset="0x311a" name="VBIF_PERF_PWR_CNT_HIGH2" variants="A6XX"/> + + <reg32 offset="0x3c00" name="GBIF_CX_CONFIG" variants="A8XX-"/> <reg32 offset="0x3c01" name="GBIF_SCACHE_CNTL0"/> <reg32 offset="0x3c02" name="GBIF_SCACHE_CNTL1"/> <reg32 offset="0x3c03" name="GBIF_QSB_SIDE0"/> @@ -712,30 +1272,66 @@ by a particular renderpass/blit. <reg32 offset="0x3c06" name="GBIF_QSB_SIDE3"/> <reg32 offset="0x3c45" name="GBIF_HALT"/> <reg32 offset="0x3c46" name="GBIF_HALT_ACK"/> + <reg32 offset="0x3c49" name="GBIF_REINIT_ENABLE" variants="A8XX-"/> + <reg32 offset="0x3c4a" name="GBIF_REINIT_DONE" variants="A8XX-"/> <reg32 offset="0x3cc0" name="GBIF_PERF_PWR_CNT_EN"/> <reg32 offset="0x3cc1" name="GBIF_PERF_PWR_CNT_CLR"/> <reg32 offset="0x3cc2" name="GBIF_PERF_CNT_SEL"/> - <reg32 offset="0x3cc3" name="GBIF_PERF_PWR_CNT_SEL"/> - <reg32 offset="0x3cc4" name="GBIF_PERF_CNT_LOW0"/> - <reg32 offset="0x3cc5" name="GBIF_PERF_CNT_LOW1"/> - <reg32 offset="0x3cc6" name="GBIF_PERF_CNT_LOW2"/> - <reg32 offset="0x3cc7" name="GBIF_PERF_CNT_LOW3"/> - <reg32 offset="0x3cc8" name="GBIF_PERF_CNT_HIGH0"/> - <reg32 offset="0x3cc9" name="GBIF_PERF_CNT_HIGH1"/> - <reg32 offset="0x3cca" name="GBIF_PERF_CNT_HIGH2"/> - <reg32 offset="0x3ccb" name="GBIF_PERF_CNT_HIGH3"/> - <reg32 offset="0x3ccc" name="GBIF_PWR_CNT_LOW0"/> - <reg32 offset="0x3ccd" name="GBIF_PWR_CNT_LOW1"/> - <reg32 offset="0x3cce" name="GBIF_PWR_CNT_LOW2"/> - <reg32 offset="0x3ccf" name="GBIF_PWR_CNT_HIGH0"/> - <reg32 offset="0x3cd0" name="GBIF_PWR_CNT_HIGH1"/> - <reg32 offset="0x3cd1" name="GBIF_PWR_CNT_HIGH2"/> + <reg32 offset="0x3cc3" name="GBIF_PERF_CNT_SEL_1" variants="A8XX-"/> + + <reg32 offset="0x3cc3" name="GBIF_PERF_PWR_CNT_SEL" variants="A6XX-A7XX"/> + <reg32 offset="0x3cc4" name="GBIF_PERF_CNT_LOW0" variants="A6XX-A7XX"/> + <reg32 offset="0x3cc5" name="GBIF_PERF_CNT_LOW1" variants="A6XX-A7XX"/> + <reg32 offset="0x3cc6" name="GBIF_PERF_CNT_LOW2" variants="A6XX-A7XX"/> + <reg32 offset="0x3cc7" name="GBIF_PERF_CNT_LOW3" variants="A6XX-A7XX"/> + <reg32 offset="0x3cc8" name="GBIF_PERF_CNT_HIGH0" variants="A6XX-A7XX"/> + <reg32 offset="0x3cc9" name="GBIF_PERF_CNT_HIGH1" variants="A6XX-A7XX"/> + <reg32 offset="0x3cca" name="GBIF_PERF_CNT_HIGH2" variants="A6XX-A7XX"/> + <reg32 offset="0x3ccb" name="GBIF_PERF_CNT_HIGH3" variants="A6XX-A7XX"/> + <reg32 offset="0x3ccc" name="GBIF_PWR_CNT_LOW0" variants="A6XX-A7XX"/> + <reg32 offset="0x3ccd" name="GBIF_PWR_CNT_LOW1" variants="A6XX-A7XX"/> + <reg32 offset="0x3cce" name="GBIF_PWR_CNT_LOW2" variants="A6XX-A7XX"/> + <reg32 offset="0x3ccf" name="GBIF_PWR_CNT_HIGH0" variants="A6XX-A7XX"/> + <reg32 offset="0x3cd0" name="GBIF_PWR_CNT_HIGH1" variants="A6XX-A7XX"/> + <reg32 offset="0x3cd1" name="GBIF_PWR_CNT_HIGH2" variants="A6XX-A7XX"/> + + <reg32 offset="0x3cc4" name="GBIF_PWR_CNT_SEL" variants="A8XX"/> + <reg32 offset="0x3cc6" name="GBIF_PERF_CNT_LO_0" variants="A8XX"/> + <reg32 offset="0x3cc7" name="GBIF_PERF_CNT_HI_0" variants="A8XX"/> + <reg32 offset="0x3cc8" name="GBIF_PERF_CNT_LO_1" variants="A8XX"/> + <reg32 offset="0x3cc9" name="GBIF_PERF_CNT_HI_1" variants="A8XX"/> + <reg32 offset="0x3cca" name="GBIF_PERF_CNT_LO_2" variants="A8XX"/> + <reg32 offset="0x3ccb" name="GBIF_PERF_CNT_HI_2" variants="A8XX"/> + <reg32 offset="0x3ccc" name="GBIF_PERF_CNT_LO_3" variants="A8XX"/> + <reg32 offset="0x3ccd" name="GBIF_PERF_CNT_HI_3" variants="A8XX"/> + <reg32 offset="0x3cce" name="GBIF_PERF_CNT_LO_4" variants="A8XX"/> + <reg32 offset="0x3ccf" name="GBIF_PERF_CNT_HI_4" variants="A8XX"/> + <reg32 offset="0x3cd0" name="GBIF_PERF_CNT_LO_5" variants="A8XX"/> + <reg32 offset="0x3cd1" name="GBIF_PERF_CNT_HI_5" variants="A8XX"/> + <reg32 offset="0x3cd2" name="GBIF_PERF_CNT_LO_6" variants="A8XX"/> + <reg32 offset="0x3cd3" name="GBIF_PERF_CNT_HI_6" variants="A8XX"/> + <reg32 offset="0x3cd4" name="GBIF_PERF_CNT_LO_7" variants="A8XX"/> + <reg32 offset="0x3cd5" name="GBIF_PERF_CNT_HI_7" variants="A8XX"/> + <reg32 offset="0x3ce0" name="GBIF_PWR_CNT_LO_0" variants="A8XX"/> + <reg32 offset="0x3ce1" name="GBIF_PWR_CNT_LO_1" variants="A8XX"/> + <reg32 offset="0x3ce2" name="GBIF_PWR_CNT_LO_2" variants="A8XX"/> + <reg32 offset="0x3ce3" name="GBIF_PWR_CNT_HI_0" variants="A8XX"/> + <reg32 offset="0x3ce4" name="GBIF_PWR_CNT_HI_1" variants="A8XX"/> + <reg32 offset="0x3ce5" name="GBIF_PWR_CNT_HI_2" variants="A8XX"/> <reg32 offset="0x0c00" name="VSC_DBG_ECO_CNTL"/> - <reg32 offset="0x0c02" name="VSC_BIN_SIZE" usage="rp_blit"> - <bitfield name="WIDTH" low="0" high="7" shr="5" type="uint"/> - <bitfield name="HEIGHT" low="8" high="16" shr="4" type="uint"/> + <reg32 offset="0x0df0" name="VSC_KMD_DBG_ECO_CNTL" variants="A8XX-"/> + <reg32 offset="0x0c02" name="VSC_BIN_SIZE" usage="rp_blit" variants="A6XX-A7XX"> + <bitfield name="BINW" low="0" high="7" shr="5" type="uint"/> + <bitfield name="BINH" low="8" high="16" shr="4" type="uint"/> </reg32> + + <bitset name="a8xx_bin_size" inline="yes"> + <bitfield name="BINW" low="0" high="9" shr="5" type="uint"/> + <bitfield name="BINH" low="16" high="26" shr="4" type="uint"/> + </bitset> + + <reg32 offset="0x0c02" name="VSC_BIN_SIZE" type="a8xx_bin_size" usage="rp_blit" variants="A8XX"/> <reg64 offset="0x0c03" name="VSC_SIZE_BASE" type="waddress" usage="cmd"/> <reg32 offset="0x0c06" name="VSC_EXPANDED_BIN_CNTL" usage="rp_blit"> <bitfield name="NX" low="1" high="10" type="uint"/> @@ -803,10 +1399,14 @@ by a particular renderpass/blit. <reg32 offset="0x0d08" name="VSC_UNKNOWN_0D08" variants="A7XX-" usage="rp_blit"/> - <reg32 offset="0x0E10" name="UCHE_UNKNOWN_0E10" variants="A7XX-" usage="cmd"/> - <reg32 offset="0x0E11" name="UCHE_UNKNOWN_0E11" variants="A7XX-" usage="cmd"/> + <reg32 offset="0x0e10" name="UCHE_UNKNOWN_0E10" variants="A7XX" usage="init"/> + <reg32 offset="0x0e10" name="UCHE_VARB_IDLE_TIMEOUT" variants="A8XX-"/> + <reg32 offset="0x0e11" name="UCHE_UNKNOWN_0E11" variants="A7XX" usage="init"/> + <reg32 offset="0x0e11" name="UCHE_CLIENT_PF" variants="A8XX-"/> <!-- always 0x03200000 ? --> - <reg32 offset="0x0e12" name="UCHE_UNKNOWN_0E12" usage="cmd"/> + <reg32 offset="0x0e12" name="UCHE_UNKNOWN_0E12" variants="A6XX-A7XX" usage="init"/> + <reg32 offset="0x0e15" name="UCHE_DBG_ECO_CNTL_0" variants="A8XX-"/> + <reg32 offset="0x0e16" name="UCHE_HW_DBG_CNTL" variants="A8XX-"/> <!-- adreno_reg_xy has 15 bits per coordinate, but a6xx registers only have 14 --> <bitset name="a6xx_reg_xy" inline="yes"> @@ -829,6 +1429,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x8000" name="GRAS_CL_CNTL" type="a6xx_gras_cl_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8200" name="GRAS_CL_CNTL" type="a6xx_gras_cl_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_gras_xs_clip_cull_distance" inline="yes"> <bitfield name="CLIP_MASK" low="0" high="7"/> @@ -839,6 +1440,18 @@ by a particular renderpass/blit. <reg32 offset="0x8003" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A6XX-A7XX" /> <reg32 offset="0x8004" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit" variants="A6XX-A7XX" /> + <reg32 offset="0x8201" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A8XX" /> + <reg32 offset="0x8202" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A8XX" /> + <reg32 offset="0x8203" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A8XX" /> + <reg32 offset="0x8204" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit" variants="A8XX" /> + + <reg32 offset="0x8228" name="GRAS_UNKNOWN_8228" variants="A8XX-"/> + <reg32 offset="0x8229" name="GRAS_UNKNOWN_8229" variants="A8XX-"/> + <reg32 offset="0x822a" name="GRAS_UNKNOWN_822A" variants="A8XX-"/> + <reg32 offset="0x822b" name="GRAS_UNKNOWN_822B" variants="A8XX-"/> + <reg32 offset="0x822c" name="GRAS_UNKNOWN_822C" variants="A8XX-"/> + <reg32 offset="0x822d" name="GRAS_UNKNOWN_822D" variants="A8XX-"/> + <bitset name="a6xx_gras_cl_interp_cntl" inline="yes"> <!-- see also RB_INTERP_CNTL --> <bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/> @@ -853,6 +1466,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x8005" name="GRAS_CL_INTERP_CNTL" type="a6xx_gras_cl_interp_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8080" name="GRAS_CL_INTERP_CNTL" type="a6xx_gras_cl_interp_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_gras_cl_guardband_clip_adj" inline="true"> <bitfield name="HORZ" low="0" high="8" type="uint"/> @@ -860,9 +1474,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x8006" name="GRAS_CL_GUARDBAND_CLIP_ADJ" type="a6xx_gras_cl_guardband_clip_adj" variants="A6XX-A7XX" usage="rp_blit"/> - - <!-- Something connected to depth-stencil attachment size --> - <reg32 offset="0x8007" name="GRAS_UNKNOWN_8007" variants="A7XX-" usage="rp_blit"/> + <reg32 offset="0x8205" name="GRAS_CL_GUARDBAND_CLIP_ADJ" type="a6xx_gras_cl_guardband_clip_adj" variants="A8XX-" usage="rp_blit"/> <!-- the scale/offset is per view, with up to 6 views --> <bitset name="a6xx_gras_bin_foveat" inline="yes"> @@ -887,6 +1499,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x8008" name="GRAS_BIN_FOVEAT" type="a6xx_gras_bin_foveat" variants="A7XX" usage="cmd"/> + <reg32 offset="0x8206" name="GRAS_BIN_FOVEAT" type="a6xx_gras_bin_foveat" variants="A8XX-" usage="cmd"/> <reg32 offset="0x8009" name="GRAS_BIN_FOVEAT_OFFSET_0" variants="A7XX-" usage="cmd"> <bitfield name="XOFFSET_0" low="0" high="9" shr="2" type="uint"/> @@ -921,10 +1534,23 @@ by a particular renderpass/blit. <reg32 offset="5" name="ZSCALE" type="float"/> </array> + <array offset="0x82d0" name="GRAS_CL_VIEWPORT" stride="6" length="16" variants="A8XX-" usage="rp_blit"> + <reg32 offset="0" name="XOFFSET" type="float"/> + <reg32 offset="1" name="XSCALE" type="float"/> + <reg32 offset="2" name="YOFFSET" type="float"/> + <reg32 offset="3" name="YSCALE" type="float"/> + <reg32 offset="4" name="ZOFFSET" type="float"/> + <reg32 offset="5" name="ZSCALE" type="float"/> + </array> + <array offset="0x8070" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit"> <reg32 offset="0" name="MIN" type="float"/> <reg32 offset="1" name="MAX" type="float"/> </array> + <array offset="0x80c0" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" variants="A8XX-" usage="rp_blit"> + <reg32 offset="0" name="MIN" type="float"/> + <reg32 offset="1" name="MAX" type="float"/> + </array> <bitset name="a6xx_gras_su_cntl" varset="chip"> <bitfield name="CULL_FRONT" pos="0" type="boolean"/> @@ -951,6 +1577,13 @@ by a particular renderpass/blit. <bitfield name="UNK20" low="20" high="22" variants="A6XX-A7XX"/> </bitset> <reg32 offset="0x8090" name="GRAS_SU_CNTL" type="a6xx_gras_su_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8209" name="GRAS_SU_CNTL" type="a6xx_gras_su_cntl" variants="A8XX-" usage="rp_blit"/> + + <!-- Fields moved from GRAS_SU_CNTL on earlier gens: --> + <reg32 offset="0x820c" name="GRAS_SU_STEREO_CNTL" variants="A8XX-" usage="rp_blit"> + <bitfield name="RENDERTARGETINDEXINCR" pos="18" type="boolean"/> + <bitfield name="VIEWPORTINDEXINCR" pos="19" type="boolean"/> + </reg32> <bitset name="a6xx_gras_su_point_minmax" inline="yes"> <bitfield name="MIN" low="0" high="15" type="ufixed" radix="4"/> @@ -958,25 +1591,31 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x8091" name="GRAS_SU_POINT_MINMAX" type="a6xx_gras_su_point_minmax" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x820a" name="GRAS_SU_POINT_MINMAX" type="a6xx_gras_su_point_minmax" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8092" name="GRAS_SU_POINT_SIZE" low="0" high="15" type="fixed" radix="4" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x820b" name="GRAS_SU_POINT_SIZE" low="0" high="15" type="fixed" radix="4" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_gras_su_depth_cntl" inline="yes"> <bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/> </bitset> <reg32 offset="0x8114" name="GRAS_SU_DEPTH_CNTL" variants="A6XX-A7XX" type="a6xx_gras_su_depth_cntl" usage="rp_blit"/> + <reg32 offset="0x8086" name="GRAS_SU_DEPTH_CNTL" variants="A8XX-" type="a6xx_gras_su_depth_cntl" usage="rp_blit"/> <bitset name="a6xx_gras_su_stencil_cntl" inline="yes"> <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/> </bitset> <reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" type="a6xx_gras_su_stencil_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8087" name="GRAS_SU_STENCIL_CNTL" type="a6xx_gras_su_stencil_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_gras_su_render_cntl" inline="yes"> <bitfield name="FS_DISABLE" pos="7" type="boolean"/> </bitset> <reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" type="a6xx_gras_su_render_cntl" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x8088" name="GRAS_SU_RENDER_CNTL" type="a6xx_gras_su_render_cntl" variants="A8XX-" usage="rp_blit"/> <!-- 0x8093 invalid --> <bitset name="a6xx_depth_plane_cntl" inline="yes"> @@ -984,16 +1623,25 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x8094" name="GRAS_SU_DEPTH_PLANE_CNTL" type="a6xx_depth_plane_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8089" name="GRAS_SU_DEPTH_PLANE_CNTL" type="a6xx_depth_plane_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8095" name="GRAS_SU_POLY_OFFSET_SCALE" type="float" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x808a" name="GRAS_SU_POLY_OFFSET_SCALE" type="float" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8096" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x808b" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8097" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x808c" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float" variants="A8XX-" usage="rp_blit"/> + <bitset name="a6xx_depth_buffer_info" inline="yes"> <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/> - <bitfield name="UNK3" pos="3"/> + <bitfield name="READ_ONLY" pos="3" type="boolean"/> </bitset> <!-- duplicates RB_DEPTH_BUFFER_INFO: --> <reg32 offset="0x8098" name="GRAS_SU_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x808d" name="GRAS_SU_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_gras_su_conservative_ras_cntl" inline="yes"> <bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/> @@ -1008,6 +1656,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_gras_su_conservative_ras_cntl" variants="A6XX-A7XX" usage="cmd"/> + <reg32 offset="0x820d" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_gras_su_conservative_ras_cntl" variants="A8XX-" usage="cmd"/> <reg32 offset="0x809a" name="GRAS_SU_PATH_RENDERING_CNTL"> <bitfield name="UNK0" pos="0" type="boolean"/> @@ -1022,10 +1671,16 @@ by a particular renderpass/blit. <reg32 offset="0x809c" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x809d" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x820e" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A8XX" usage="rp_blit"/> + <reg32 offset="0x820f" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A8XX" usage="rp_blit"/> + <reg32 offset="0x8210" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A8XX" usage="rp_blit"/> + <bitset name="a6xx_rast_cntl" inline="yes"> <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/> </bitset> + <reg32 offset="0x8211" name="GRAS_RAST_CNTL" type="a6xx_rast_cntl" variants="A8XX-" usage="rp_blit"/> + <enum name="a6xx_sequenced_thread_dist"> <value value="0x0" name="DIST_SCREEN_COORD"/> <value value="0x1" name="DIST_ALL_TO_RB0"/> @@ -1073,7 +1728,6 @@ by a particular renderpass/blit. </enum> <bitset name="a6xx_gras_sc_cntl" inline="yes"> - <bitfield name="CCUSINGLECACHELINESIZE" low="0" high="2"/> <bitfield name="SINGLE_PRIM_MODE" low="3" high="4" type="a6xx_single_prim_mode"/> <bitfield name="RASTER_MODE" pos="5" type="a6xx_raster_mode"/> <bitfield name="RASTER_DIRECTION" low="6" high="7" type="a6xx_raster_direction"/> @@ -1084,7 +1738,10 @@ by a particular renderpass/blit. <bitfield name="EARLYVIZOUTEN" pos="12" type="boolean"/> </bitset> - <reg32 offset="0x80a0" name="GRAS_SC_CNTL" type="a6xx_gras_sc_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x80a0" name="GRAS_SC_CNTL" type="a6xx_gras_sc_cntl" variants="A6XX-A7XX" usage="rp_blit"> + <bitfield name="CCUSINGLECACHELINESIZE" low="0" high="2" variants="A6XX-A7XX"/> + </reg32> + <reg32 offset="0x8230" name="GRAS_SC_CNTL" type="a6xx_gras_sc_cntl" variants="A8XX-" usage="rp_blit"/> <enum name="a6xx_render_mode"> <value value="0x0" name="RENDERING_PASS"/> @@ -1123,6 +1780,28 @@ by a particular renderpass/blit. <reg32 offset="0x80a1" name="GRAS_SC_BIN_CNTL" type="a6xx_bin_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <!-- Common fields for RB_CNTL and GRAS_SC_BIN_CNTL --> + <bitset name="a8xx_bin_cntl" inline="yes"> + <bitfield name="BINW" low="0" high="9" shr="5" type="uint"/> + <bitfield name="BINH" low="16" high="26" shr="4" type="uint"/> + <bitfield name="RENDER_MODE" low="11" high="13" type="a6xx_render_mode"/> + <doc> + Allows draws that don't have GRAS_LRZ_CNTL.LRZ_WRITE but have + GRAS_LRZ_CNTL.ENABLE to contribute to LRZ during RENDERING pass. + In sysmem mode GRAS_LRZ_CNTL.LRZ_WRITE is not considered. + </doc> + <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="28" high="30" type="a6xx_lrz_feedback_mask"/> + <doc>Disable LRZ feedback writes</doc> + <bitfield name="FORCE_LRZ_WRITE_DIS" pos="31" type="boolean"/> + </bitset> + + <reg32 offset="0x8231" name="GRAS_SC_BIN_CNTL" type="a8xx_bin_cntl" variants="A8XX-" usage="rp_blit"> + <bitfield name="CONS_VIS_IN_BINNING" pos="10" type="boolean"/> + <bitfield name="FORCE_BI_DIR_LRZ_DISABLE" pos="14" type="boolean"/> + <bitfield name="FORCE_LRZ_DIS" pos="15" type="boolean"/> + <bitfield name="BIN_VRS_DIS" pos="27" type="boolean"/> + </reg32> + <bitset name="a6xx_gras_sc_ras_msaa_cntl" inline="yes"> <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/> <bitfield name="UNK2" pos="2"/> @@ -1130,6 +1809,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x80a2" name="GRAS_SC_RAS_MSAA_CNTL" type="a6xx_gras_sc_ras_msaa_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8232" name="GRAS_SC_RAS_MSAA_CNTL" type="a6xx_gras_sc_ras_msaa_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_gras_sc_dest_msaa_cntl" inline="yes"> <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/> @@ -1137,6 +1817,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x80a3" name="GRAS_SC_DEST_MSAA_CNTL" type="a6xx_gras_sc_dest_msaa_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8233" name="GRAS_SC_DEST_MSAA_CNTL" type="a6xx_gras_sc_dest_msaa_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_msaa_sample_pos_cntl" inline="yes"> <bitfield name="UNK0" pos="0"/> @@ -1158,13 +1839,21 @@ by a particular renderpass/blit. <reg32 offset="0x80a5" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x80a6" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8237" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8238" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8239" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x823a" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_2" type="a6xx_programmable_msaa_pos" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x823b" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_3" type="a6xx_programmable_msaa_pos" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x80a7" name="GRAS_ROTATION_CNTL" variants="A7XX" usage="cmd"/> + <reg32 offset="0x8207" name="GRAS_ROTATION_CNTL" variants="A8XX-" usage="cmd"/> <bitset name="a6xx_screen_scissor_cntl" inline="yes"> <bitfield name="SCISSOR_DISABLE" pos="0" type="boolean"/> </bitset> <reg32 offset="0x80af" name="GRAS_SC_SCREEN_SCISSOR_CNTL" type="a6xx_screen_scissor_cntl" variants="A6XX-A7XX" pos="0" usage="cmd"/> + <reg32 offset="0x8234" name="GRAS_SC_SCREEN_SCISSOR_CNTL" type="a6xx_screen_scissor_cntl" variants="A8XX-" pos="0" usage="cmd"/> <bitset name="a6xx_scissor_xy" inline="yes"> <bitfield name="X" low="0" high="15" type="uint"/> @@ -1176,14 +1865,26 @@ by a particular renderpass/blit. <reg32 offset="1" name="BR" type="a6xx_scissor_xy"/> </array> + <array offset="0x8240" name="GRAS_SC_SCREEN_SCISSOR" stride="2" length="16" variants="A8XX-" usage="rp_blit"> + <reg32 offset="0" name="TL" type="a6xx_scissor_xy"/> + <reg32 offset="1" name="BR" type="a6xx_scissor_xy"/> + </array> + <array offset="0x80d0" name="GRAS_SC_VIEWPORT_SCISSOR" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit"> <reg32 offset="0" name="TL" type="a6xx_scissor_xy"/> <reg32 offset="1" name="BR" type="a6xx_scissor_xy"/> </array> + <array offset="0x8270" name="GRAS_SC_VIEWPORT_SCISSOR" stride="2" length="16" variants="A8XX-" usage="rp_blit"> + <reg32 offset="0" name="TL" type="a6xx_scissor_xy"/> + <reg32 offset="1" name="BR" type="a6xx_scissor_xy"/> + </array> <reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8235" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8236" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/> + <enum name="a6xx_fsr_combiner"> <value value="0" name="FSR_COMBINER_OP_KEEP"/> <value value="1" name="FSR_COMBINER_OP_REPLACE"/> @@ -1203,6 +1904,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x80f4" name="GRAS_VRS_CONFIG" type="a6xx_gras_vrs_config" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x8208" name="GRAS_VRS_CONFIG" type="a6xx_gras_vrs_config" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_gras_quality_buffer_info" inline="yes"> <bitfield name="LAYERED" pos="0" type="boolean"/> @@ -1210,6 +1912,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x80f5" name="GRAS_QUALITY_BUFFER_INFO" type="a6xx_gras_quality_buffer_info" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x808e" name="GRAS_QUALITY_BUFFER_INFO" type="a6xx_gras_quality_buffer_info" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_gras_quality_buffer_dimension" inline="yes"> <bitfield name="WIDTH" low="0" high="15" type="uint"/> @@ -1217,8 +1920,10 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x80f6" name="GRAS_QUALITY_BUFFER_DIMENSION" type="a6xx_gras_quality_buffer_dimension" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x808f" name="GRAS_QUALITY_BUFFER_DIMENSION" type="a6xx_gras_quality_buffer_dimension" variants="A8XX-" usage="rp_blit"/> <reg64 offset="0x80f8" name="GRAS_QUALITY_BUFFER_BASE" variants="A7XX" type="waddress" usage="rp_blit"/> + <reg64 offset="0x8090" name="GRAS_QUALITY_BUFFER_BASE" variants="A8XX-" type="waddress" usage="rp_blit"/> <bitset name="a6xx_gras_quality_buffer_pitch" inline="yes"> <bitfield name="PITCH" shr="6" low="0" high="7" type="uint"/> @@ -1226,6 +1931,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x80fa" name="GRAS_QUALITY_BUFFER_PITCH" type="a6xx_gras_quality_buffer_pitch" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x8092" name="GRAS_QUALITY_BUFFER_PITCH" type="a6xx_gras_quality_buffer_pitch" variants="A8XX-" usage="rp_blit"/> <enum name="a6xx_lrz_dir_status"> <value value="0x1" name="LRZ_DIR_LE"/> @@ -1244,7 +1950,6 @@ by a particular renderpass/blit. - 0.0 if GREATER - 1.0 if LESS </doc> - <bitfield name="FC_ENABLE" pos="3" type="boolean" variants="A6XX"/> <!-- set when depth-test + depth-write enabled --> <bitfield name="Z_WRITE_ENABLE" pos="4" type="boolean"/> <bitfield name="Z_BOUNDS_ENABLE" pos="5" type="boolean"/> @@ -1258,11 +1963,27 @@ by a particular renderpass/blit. Disable LRZ based on previous direction and the current one. If DIR_WRITE is not enabled - there is no write to direction buffer. </doc> - <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean" variants="A6XX"/> <bitfield name="Z_FUNC" low="11" high="13" type="adreno_compare_func" variants="A7XX-"/> </bitset> - <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" type="a6xx_gras_lrz_cntl" usage="rp_blit" variants="A6XX-A7XX"/> + <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" type="a6xx_gras_lrz_cntl" usage="rp_blit" variants="A6XX"> + <bitfield name="FC_ENABLE" pos="3" type="boolean" variants="A6XX"/> + <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean" variants="A6XX"/> + </reg32> + <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" type="a6xx_gras_lrz_cntl" usage="rp_blit" variants="A7XX"/> + <reg32 offset="0x8212" name="GRAS_LRZ_CNTL" type="a6xx_gras_lrz_cntl" usage="rp_blit" variants="A8XX-"/> + + <reg32 offset="0x8007" name="GRAS_LRZ_CB_CNTL" variants="A7XX" usage="rp_blit"> + <doc> + The total size of the LRZ image array (not including + fast clear buffer), used as a stride for double + buffering used with concurrent binning. + </doc> + <bitfield name="DOUBLE_BUFFER_STRIDE" low="8" high="31" shr="8"/> + </reg32> + <reg32 offset="0x8101" name="GRAS_LRZ_CB_CNTL" usage="rp_blit" variants="A8XX-"> + <bitfield name="DOUBLE_BUFFER_PITCH" low="8" high="31" shr="8"/> + </reg32> <enum name="a6xx_fragcoord_sample_mode"> <value value="0" name="FRAGCOORD_CENTER"/> @@ -1275,14 +1996,17 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x8101" name="GRAS_LRZ_PS_INPUT_CNTL" type="a6xx_gras_lrz_ps_input_cntl" usage="rp_blit" variants="A6XX-A7XX"/> + <reg32 offset="0x8102" name="GRAS_LRZ_PS_INPUT_CNTL" type="a6xx_gras_lrz_ps_input_cntl" usage="rp_blit" variants="A8XX-"/> <bitset name="a6xx_gras_lrz_mrt_buffer_info_0" inline="yes"> <bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/> </bitset> <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUFFER_INFO_0" type="a6xx_gras_lrz_mrt_buffer_info_0" usage="rp_blit" variants="A6XX-A7XX"/> + <reg32 offset="0x8103" name="GRAS_LRZ_MRT_BUFFER_INFO_0" type="a6xx_gras_lrz_mrt_buffer_info_0" usage="rp_blit" variants="A8XX-"/> <reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit" variants="A6XX-A7XX"/> + <reg64 offset="0x8104" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit" variants="A8XX-"/> <bitset name="a6xx_gras_lrz_buffer_pitch" inline="yes"> <bitfield name="PITCH" low="0" high="7" shr="5" type="uint"/> @@ -1290,6 +2014,9 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" type="a6xx_gras_lrz_buffer_pitch" usage="rp_blit" variants="A6XX-A7XX"/> + <reg32 offset="0x8108" name="GRAS_LRZ_BUFFER_PITCH" type="a6xx_gras_lrz_buffer_pitch" usage="rp_blit" variants="A8XX-"/> + + <reg32 offset="0x810e" name="GRAS_LRZ_BUFFER_STRIDE" usage="rp_blit" low="0" high="16" shr="12" variants="A8XX-"/> <!-- The LRZ "fast clear" buffer is initialized to zero's by blob, and @@ -1346,22 +2073,32 @@ by a particular renderpass/blit. <!-- 0x810c-0x810f invalid --> + <reg32 offset="0x8110" name="GRAS_LRZ_BUFFER_SLICE_PITCH" low="0" high="31" shr="8" type="uint" variants="A8XX-"/> + <reg32 offset="0x8110" name="GRAS_MODE_CNTL" low="0" high="1" variants="A6XX-A7XX" usage="cmd"/> + <reg32 offset="0x8213" name="GRAS_MODE_CNTL" low="0" high="1" variants="A8XX-" usage="cmd"/> <!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR --> <reg32 offset="0x8111" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A7XX"/> + <reg32 offset="0x810d" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A8XX-"/> - <bitset name="a6xx_gras_lrz_depth_buffer_info" inline="yes"> - <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/> - <bitfield name="UNK3" pos="3"/> - </bitset> - - <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" type="a6xx_gras_lrz_depth_buffer_info" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x810f" name="GRAS_LRZ_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A8XX" usage="rp_blit"/> <doc>LUT used to convert quality buffer values to HW shading rate values. An array of 4-bit values.</doc> - <array offset="0x8120" name="GRAS_LRZ_QUALITY_LOOKUP_TABLE" variants="A7XX-" stride="1" length="2"/> + <array offset="0x8120" name="GRAS_LRZ_QUALITY_LOOKUP_TABLE" variants="A7XX" stride="1" length="2"/> + <array offset="0x8130" name="GRAS_LRZ_QUALITY_LOOKUP_TABLE" variants="A8XX-" stride="1" length="2"/> - <!-- 0x8112-0x83ff invalid --> + <reg32 offset="0x810c" name="GRAS_LRZ_COLOR_COMP_MASK" variants="A8XX-"> + <bitfield name="MRT0" low="0" high="3"/> + <bitfield name="MRT1" low="4" high="7"/> + <bitfield name="MRT2" low="8" high="11"/> + <bitfield name="MRT3" low="12" high="15"/> + <bitfield name="MRT4" low="16" high="19"/> + <bitfield name="MRT5" low="20" high="23"/> + <bitfield name="MRT6" low="24" high="27"/> + <bitfield name="MRT7" low="28" high="31"/> + </reg32> <enum name="a6xx_rotation"> <value value="0x0" name="ROTATE_0"/> @@ -1372,7 +2109,7 @@ by a particular renderpass/blit. <value value="0x5" name="ROTATE_VFLIP"/> </enum> - <bitset name="a6xx_a2d_bit_cntl" inline="yes"> + <bitset name="a6xx_a2d_blt_cntl" inline="yes"> <bitfield name="ROTATE" low="0" high="2" type="a6xx_rotation"/> <bitfield name="OVERWRITEEN" pos="3" type="boolean"/> <bitfield name="UNK4" low="4" high="6"/> @@ -1391,7 +2128,7 @@ by a particular renderpass/blit. <bitfield name="COPY" pos="30" type="boolean" variants="A7XX-"/> </bitset> - <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_blt_cntl" variants="A6XX-A7XX" usage="rp_blit"/> <!-- note: the low 8 bits for src coords are valid, probably fixed point it would be a bit weird though, since we subtract 1 from BR coords apparently signed, gallium driver uses negative coords and it works? @@ -1408,16 +2145,35 @@ by a particular renderpass/blit. <reg32 offset="0x840a" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x840b" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x8500" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_blt_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8501" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8502" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8503" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8504" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8505" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8506" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8507" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x8508" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/> + <!-- always 0x880 ? (and 0 in a640/a650 traces?) --> - <reg32 offset="0x8600" name="GRAS_DBG_ECO_CNTL" usage="cmd"> + <reg32 offset="0x8600" name="GRAS_DBG_ECO_CNTL" usage="init" variants="A6XX-A7XX"> <bitfield name="UNK7" pos="7" type="boolean"/> <bitfield name="LRZCACHELOCKDIS" pos="11" type="boolean"/> </reg32> - <reg32 offset="0x8601" name="GRAS_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/> - <reg32 offset="0x8602" name="GRAS_NC_MODE_CNTL" variants="A7XX-"/> - <array offset="0x8610" name="GRAS_PERFCTR_TSE_SEL" stride="1" length="4"/> - <array offset="0x8614" name="GRAS_PERFCTR_RAS_SEL" stride="1" length="4"/> - <array offset="0x8618" name="GRAS_PERFCTR_LRZ_SEL" stride="1" length="4"/> + <reg32 offset="0x8600" name="GRAS_TSEFE_DBG_ECO_CNTL" variants="A8XX-"/> + <reg32 offset="0x8702" name="GRAS_DBG_ECO_CNTL" variants="A8XX"/> + <reg32 offset="0x8601" name="GRAS_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" variants="A6XX"/> + <reg32 offset="0x8602" name="GRAS_NC_MODE_CNTL" variants="A7XX"/> + <reg32 offset="0x8700" name="GRAS_NC_MODE_CNTL" variants="A8XX-"/> + <array offset="0x8610" name="GRAS_PERFCTR_TSE_SEL" stride="1" length="4" variants="A6XX-A7XX"/> + <array offset="0x8614" name="GRAS_PERFCTR_RAS_SEL" stride="1" length="4" variants="A6XX-A7XX"/> + <array offset="0x8618" name="GRAS_PERFCTR_LRZ_SEL" stride="1" length="4" variants="A6XX-A7XX"/> + + <array offset="0x8610" name="GRAS_PERFCTR_TSEFE_SEL" stride="1" length="6" variants="A8XX-"/> + <array offset="0x8710" name="GRAS_PERFCTR_TSE_SEL" stride="1" length="6" variants="A8XX-"/> + <array offset="0x8720" name="GRAS_PERFCTR_RAS_SEL" stride="1" length="4" variants="A8XX-"/> + <array offset="0x8730" name="GRAS_PERFCTR_LRZ_SEL" stride="1" length="6" variants="A8XX-"/> + <!-- note 0x8620-0x87ff are not all invalid (in particular, 0x8631/0x8632 have 0x3fff3fff mask and would be xy coords) @@ -1425,6 +2181,7 @@ by a particular renderpass/blit. <!-- same as GRAS_BIN_CONTROL, but without bit 27: --> <reg32 offset="0x8800" name="RB_CNTL" variants="A6XX-A7XX" type="a6xx_bin_cntl" usage="rp_blit"/> + <reg32 offset="0x8800" name="RB_CNTL" variants="A8XX-" type="a8xx_bin_cntl" usage="rp_blit"/> <reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit"> <bitfield name="CCUSINGLECACHELINESIZE" low="3" high="5"/> @@ -1462,7 +2219,8 @@ by a particular renderpass/blit. <reg32 offset="0x8804" name="RB_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/> <reg32 offset="0x8805" name="RB_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/> <reg32 offset="0x8806" name="RB_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/> - <!-- 0x8807-0x8808 invalid --> + <reg32 offset="0x8807" name="RB_PROGRAMMABLE_MSAA_POS_2" type="a6xx_programmable_msaa_pos" usage="rp_blit" variants="A8XX-"/> + <reg32 offset="0x8808" name="RB_PROGRAMMABLE_MSAA_POS_3" type="a6xx_programmable_msaa_pos" usage="rp_blit" variants="A8XX-"/> <!-- note: maybe not actually called RB_RENDER_CONTROLn (since RB_RENDER_CNTL name comes from kernel and is probably right) @@ -1476,7 +2234,7 @@ by a particular renderpass/blit. <bitfield name="IJ_LINEAR_CENTROID" pos="4" type="boolean"/> <bitfield name="IJ_LINEAR_SAMPLE" pos="5" type="boolean"/> <bitfield name="COORD_MASK" low="6" high="9" type="hex"/> - <bitfield name="UNK10" pos="10" type="boolean"/> + <bitfield name="INTERP_EN" pos="10" type="boolean"/> </reg32> <reg32 offset="0x880a" name="RB_PS_INPUT_CNTL" usage="rp_blit"> <!-- enable bits for various FS sysvalue regs: --> @@ -1534,8 +2292,32 @@ by a particular renderpass/blit. <reg32 offset="0x8810" name="RB_PS_SAMPLEFREQ_CNTL" usage="rp_blit"> <bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/> </reg32> - <reg32 offset="0x8811" name="RB_UNKNOWN_8811" low="4" high="6" usage="cmd"/> - <reg32 offset="0x8812" name="RB_UNKNOWN_8812" variants="A7XX-" usage="rp_blit"/> + <reg32 offset="0x8811" name="RB_MODE_CNTL" low="4" high="6" usage="cmd"/> + <reg32 offset="0x8812" name="RB_BUFFER_CNTL" variants="A7XX-" usage="rp_blit"> + <bitfield name="Z_SYSMEM" pos="0" type="boolean"/> + <bitfield name="S_SYSMEM" pos="1" type="boolean"/> + <bitfield name="RT0_SYSMEM" pos="2" type="boolean"/> + <bitfield name="RT1_SYSMEM" pos="3" type="boolean"/> + <bitfield name="RT2_SYSMEM" pos="4" type="boolean"/> + <bitfield name="RT3_SYSMEM" pos="5" type="boolean"/> + <bitfield name="RT4_SYSMEM" pos="6" type="boolean"/> + <bitfield name="RT5_SYSMEM" pos="7" type="boolean"/> + <bitfield name="RT6_SYSMEM" pos="8" type="boolean"/> + <bitfield name="RT7_SYSMEM" pos="9" type="boolean"/> + <bitfield name="Z_FULL_IN_GMEM" pos="10" type="boolean" variants="A8XX-"/> + <bitfield name="S_FULL_IN_GMEM" pos="11" type="boolean" variants="A8XX-"/> + <bitfield name="RT0_FULL_IN_GMEM" pos="12" type="boolean" variants="A8XX-"/> + <bitfield name="RT1_FULL_IN_GMEM" pos="13" type="boolean" variants="A8XX-"/> + <bitfield name="RT2_FULL_IN_GMEM" pos="14" type="boolean" variants="A8XX-"/> + <bitfield name="RT3_FULL_IN_GMEM" pos="15" type="boolean" variants="A8XX-"/> + <bitfield name="RT4_FULL_IN_GMEM" pos="16" type="boolean" variants="A8XX-"/> + <bitfield name="RT5_FULL_IN_GMEM" pos="17" type="boolean" variants="A8XX-"/> + <bitfield name="RT6_FULL_IN_GMEM" pos="18" type="boolean" variants="A8XX-"/> + <bitfield name="RT7_FULL_IN_GMEM" pos="19" type="boolean" variants="A8XX-"/> + </reg32> + + <reg32 offset="0x8816" name="RB_RESOLVE_CR_CNTL" variants="A8XX-" usage="rp_blit"/> + <!-- 0x8813-0x8817 invalid --> <!-- always 0x0 ? --> <reg32 offset="0x8818" name="RB_UNKNOWN_8818" low="0" high="6" usage="cmd"/> @@ -1546,11 +2328,17 @@ by a particular renderpass/blit. <reg32 offset="0x881c" name="RB_UNKNOWN_881C" usage="cmd"/> <reg32 offset="0x881d" name="RB_UNKNOWN_881D" usage="cmd"/> <reg32 offset="0x881e" name="RB_UNKNOWN_881E" usage="cmd"/> - <!-- 0x881f invalid --> + + <!-- Duplicates fields from SP_PS_CNTL_0 --> + <reg32 offset="0x881f" name="RB_PS_CNTL" variants="A8XX-" usage="rp_blit"> + <bitfield name="PIXLODENABLE" pos="0" type="boolean"/> + <bitfield name="LODPIXMASK" pos="1" type="boolean"/> + </reg32> + <array offset="0x8820" name="RB_MRT" stride="8" length="8" usage="rp_blit"> <reg32 offset="0x0" name="CONTROL"> - <bitfield name="BLEND" pos="0" type="boolean"/> - <bitfield name="BLEND2" pos="1" type="boolean"/> + <bitfield name="COLOR_BLEND_EN" pos="0" type="boolean"/> + <bitfield name="ALPHA_BLEND_EN" pos="1" type="boolean"/> <bitfield name="ROP_ENABLE" pos="2" type="boolean"/> <bitfield name="ROP_CODE" low="3" high="6" type="a3xx_rop_code"/> <bitfield name="COMPONENT_ENABLE" low="7" high="10" type="hex"/> @@ -1613,7 +2401,9 @@ by a particular renderpass/blit. <bitfield name="ALPHA_TO_ONE" pos="11" type="boolean"/> <bitfield name="SAMPLE_MASK" low="16" high="31"/> </reg32> - <!-- 0x8866-0x886f invalid --> + <reg32 offset="0x8866" name="RB_LB_PARAM_LIMIT" variants="A8XX-" usage="rp_blit"> + <bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/> + </reg32> <reg32 offset="0x8870" name="RB_DEPTH_PLANE_CNTL" type="a6xx_depth_plane_cntl" usage="rp_blit"/> <reg32 offset="0x8871" name="RB_DEPTH_CNTL" usage="rp_blit"> @@ -1627,14 +2417,15 @@ by a particular renderpass/blit. </doc> <bitfield name="Z_READ_ENABLE" pos="6" type="boolean"/> <bitfield name="Z_BOUNDS_ENABLE" pos="7" type="boolean"/> + <!-- clamp shader depth out to [0, 1] (instead of RB_VIEWPORT_ZCLAMP_MIN/MAX)--> + <bitfield name="O_DEPTH_01_CLAMP_EN" pos="8" type="boolean" variants="A8XX-"/> </reg32> <!-- duplicates GRAS_SU_DEPTH_BUFFER_INFO: --> <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A6XX" type="a6xx_depth_buffer_info" usage="rp_blit"/> <!-- first 4 bits duplicates GRAS_SU_DEPTH_BUFFER_INFO --> - <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit"> - <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/> - <bitfield name="UNK3" low="3" high="4"/> + <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A7XX-" usage="rp_blit"> + <bitfield name="PRT" low="3" high="4"/> <bitfield name="TILEMODE" low="5" high="6" type="a6xx_tile_mode"/> <bitfield name="LOSSLESSCOMPEN" pos="7" type="boolean"/> </reg32> @@ -1702,12 +2493,22 @@ by a particular renderpass/blit. <reg32 offset="0x8898" name="RB_LRZ_CNTL" usage="rp_blit"> <bitfield name="ENABLE" pos="0" type="boolean"/> </reg32> - <reg32 offset="0x8899" name="RB_UNKNOWN_8899" variants="A7XX-" usage="cmd"/> + <reg32 offset="0x8899" name="RB_LRZ_CNTL2" variants="A7XX-" usage="cmd"> + <bitfield name="ENABLE_BIDIRECTIONAL_LRZ" pos="0" type="boolean"/> + </reg32> <!-- 0x8899-0x88bf invalid --> <!-- clamps depth value for depth test/write --> <reg32 offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MIN" type="float" usage="rp_blit" variants="A6XX-A7XX"/> <reg32 offset="0x88c1" name="RB_VIEWPORT_ZCLAMP_MAX" type="float" usage="rp_blit" variants="A6XX-A7XX"/> +<!-- todo allow type="float" on an <array/> --> + <array offset="0x88b0" name="RB_VIEWPORT_ZCLAMP_MIN" stride="1" length="16" usage="rp_blit" variants="A8XX-"> + <reg32 offset="0" name="REG" type="float"/> + </array> + <array offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MAX" stride="1" length="16" usage="rp_blit" variants="A8XX-"> + <reg32 offset="0" name="REG" type="float"/> + </array> + <!-- 0x88c2-0x88cf invalid--> <reg32 offset="0x88d0" name="RB_RESOLVE_CNTL_0" usage="rp_blit"> <bitfield name="UNK0" low="0" high="12"/> @@ -1720,6 +2521,11 @@ by a particular renderpass/blit. <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/> <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/> </reg32> + + <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" type="a8xx_bin_size" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x88f0" name="RB_RESOLVE_CNTL_4" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x88f1" name="RB_RESOLVE_CNTL_5" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x88d4" name="RB_RESOLVE_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/> <reg32 offset="0x88d5" name="RB_RESOLVE_GMEM_BUFFER_INFO" usage="rp_blit"> <bitfield name="SAMPLES" low="3" high="4" type="a3xx_msaa_samples"/> @@ -1798,8 +2604,10 @@ by a particular renderpass/blit. <value value="0x1" name="CCU_CACHE_SIZE_HALF"/> <value value="0x2" name="CCU_CACHE_SIZE_QUARTER"/> <value value="0x3" name="CCU_CACHE_SIZE_EIGHTH"/> + <!-- for DEPTH_CACHE_SIZE 3 == THREE_QUARTER from KNP --> + <value value="0x3" name="CCU_CACHE_SIZE_THREE_QUARTER"/> </enum> - <reg32 offset="0x88e5" name="RB_CCU_CACHE_CNTL" variants="A7XX-" usage="cmd"> + <reg32 offset="0x88e5" name="RB_CCU_CACHE_CNTL" variants="A7XX" usage="cmd"> <bitfield name="DEPTH_OFFSET_HI" pos="0" type="hex"/> <bitfield name="COLOR_OFFSET_HI" pos="2" type="hex"/> <bitfield name="DEPTH_CACHE_SIZE" low="10" high="11" type="a6xx_ccu_cache_size"/> @@ -1814,12 +2622,30 @@ by a particular renderpass/blit. --> <bitfield name="COLOR_OFFSET" low="23" high="31" shr="12" type="hex"/> </reg32> - <!-- 0x88e6-0x88ef invalid --> + + <reg32 offset="0x88e5" name="RB_CCU_CACHE_CNTL" variants="A8XX-" usage="cmd"> + <!-- + For color cache, full is 128KB per CCU. For depth cache, + full is 256KB per CCU. + + For attr/pos caches (see VPC_{ATTR,POS,BV_POS}_BUF_GMEM_SIZE), + the sizes are per CCU + --> + <bitfield name="COLOR_OFFSET" low="0" high="13" shr="12" type="hex"/> + <bitfield name="COLOR_CACHE_SIZE" low="14" high="15" type="a6xx_ccu_cache_size"/> + <bitfield name="DEPTH_OFFSET" low="16" high="29" shr="12" type="hex"/> + <bitfield name="DEPTH_CACHE_SIZE" low="30" high="31" type="a6xx_ccu_cache_size"/> + </reg32> + + <reg32 offset="0x88e6" name="RB_RESOLVE_GMEM_BUFFER_CNTL" variants="A8XX-"> + <bitfield name="FULL_IN_GMEM" pos="0" type="boolean"/> + </reg32> + <!-- always 0x0 ? --> - <reg32 offset="0x88f0" name="RB_UNKNOWN_88F0" low="0" high="11" usage="cmd"/> + <reg32 offset="0x88f0" name="RB_UNKNOWN_88F0" low="0" high="11" variants="A6XX" usage="cmd"/> <!-- could be for separate stencil? (or may not be a flag buffer at all) --> - <reg64 offset="0x88f1" name="RB_UNK_FLAG_BUFFER_BASE" type="waddress" align="64"/> - <reg32 offset="0x88f3" name="RB_UNK_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch"/> + <reg64 offset="0x88f1" name="RB_UNK_FLAG_BUFFER_BASE" type="waddress" align="64" variants="A6XX"/> + <reg32 offset="0x88f3" name="RB_UNK_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch" variants="A6XX"/> <reg32 offset="0x88f4" name="RB_VRS_CONFIG" usage="rp_blit"> <bitfield name="UNK2" pos="2" type="boolean"/> @@ -1849,9 +2675,19 @@ by a particular renderpass/blit. the address is specified through CP_EVENT_WRITE7::WRITE_SAMPLE_COUNT. </doc> <reg64 offset="0x8927" name="RB_SAMPLE_COUNTER_BASE" type="waddress" align="16" usage="cmd"/> - <!-- 0x8929-0x89ff invalid --> - <!-- TODO: there are some registers in the 0x8a00-0x8bff range --> + <bitset name="a8xx_gmem_dimension" inline="yes"> + <bitfield name="WIDTH" low="0" high="14" type="uint"/> + <bitfield name="HEIGHT" low="16" high="30" type="uint"/> + </bitset> + + <reg32 offset="0x8813" name="RB_DEPTH_GMEM_DIMENSION" type="a8xx_gmem_dimension" variants="A8XX-"/> + <reg32 offset="0x8814" name="RB_STENCIL_GMEM_DIMENSION" type="a8xx_gmem_dimension" variants="A8XX-"/> + <reg32 offset="0x8815" name="RB_RESOLVE_GMEM_DIMENSION" type="a8xx_gmem_dimension" variants="A8XX-"/> + + <array offset="0x8930" name="RB_MRT_GMEM_DIMENSION" variants="A8XX-" stride="1" length="8"> + <reg32 offset="0" name="REG" type="a8xx_gmem_dimension"/> + </array> <!-- These show up in a6xx gen3+ but so far haven't found an example of @@ -1862,7 +2698,7 @@ by a particular renderpass/blit. <reg32 offset="0x8a20" name="RB_UNKNOWN_8A20" variants="A6XX" usage="rp_blit"/> <reg32 offset="0x8a30" name="RB_UNKNOWN_8A30" variants="A6XX" usage="rp_blit"/> - <reg32 offset="0x8c00" name="RB_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/> + <reg32 offset="0x8c00" name="RB_A2D_BLT_CNTL" type="a6xx_a2d_blt_cntl" usage="rp_blit"/> <reg32 offset="0x8c01" name="RB_A2D_PIXEL_CNTL" low="0" high="31" usage="rp_blit"/> <bitset name="a6xx_a2d_src_texture_info" inline="yes"> @@ -1921,10 +2757,10 @@ by a particular renderpass/blit. <!-- 0x8c35-0x8dff invalid --> <!-- always 0x1 ? either doesn't exist for a650 or write-only: --> - <reg32 offset="0x8e01" name="RB_UNKNOWN_8E01" usage="cmd"/> + <reg32 offset="0x8e01" name="RB_RBP_CNTL" usage="cmd"/> <!-- 0x8e00-0x8e03 invalid --> <reg32 offset="0x8e04" name="RB_DBG_ECO_CNTL" usage="cmd"/> <!-- TODO: valid mask 0xfffffeff --> - <reg32 offset="0x8e05" name="RB_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/> + <reg32 offset="0x8e05" name="RB_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" variants="A6XX"/> <!-- 0x02080000 in GMEM, zero otherwise? --> <reg32 offset="0x8e06" name="RB_CCU_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/> @@ -1963,7 +2799,7 @@ by a particular renderpass/blit. <bitfield name="CONCURRENT_UNRESOLVE_MODE" low="5" high="6" type="a7xx_concurrent_unresolve_mode"/> <!-- rest of the bits were moved to RB_CCU_CACHE_CNTL --> </reg32> - <reg32 offset="0x8e08" name="RB_NC_MODE_CNTL"> + <reg32 offset="0x8e08" name="RB_NC_MODE_CNTL" variants="A6XX-A7XX"> <bitfield name="MODE" pos="0" type="boolean"/> <bitfield name="LOWER_BIT" low="1" high="2" type="uint"/> <bitfield name="MIN_ACCESS_LENGTH" pos="3" type="boolean"/> <!-- true=64b false=32b --> @@ -1972,26 +2808,40 @@ by a particular renderpass/blit. <bitfield name="RGB565_PREDICATOR" pos="11" type="boolean"/> <bitfield name="UNK12" low="12" high="13"/> </reg32> - <reg32 offset="0x8e09" name="RB_UNKNOWN_8E09" variants="A7XX-" usage="cmd"/> + <reg32 offset="0x8e08" name="RB_CCU_NC_MODE_CNTL" variants="A8XX-"/> + + <reg32 offset="0x8e09" name="RB_UNKNOWN_8E09" variants="A7XX" usage="cmd"/> + <reg32 offset="0x8e09" name="RB_GC_GMEM_PROTECT" variants="A8XX-"/> + <reg32 offset="0x8e0a" name="RB_LPAC_GMEM_PROTECT" variants="A8XX-"/> <!-- 0x8e09-0x8e0f invalid --> <array offset="0x8e10" name="RB_PERFCTR_RB_SEL" stride="1" length="8"/> <array offset="0x8e18" name="RB_PERFCTR_CCU_SEL" stride="1" length="5"/> <!-- 0x8e1d-0x8e1f invalid --> <!-- 0x8e20-0x8e25 more perfcntr sel? --> <!-- 0x8e26-0x8e27 invalid --> - <reg32 offset="0x8e28" name="RB_CMP_DBG_ECO_CNTL"/> + + <reg32 offset="0x8f00" name="RB_CMP_NC_MODE_CNTL" variants="A8XX-"/> + <reg32 offset="0x8f01" name="RB_RESOLVE_PREFETCH_CNTL" variants="A8XX-"/> + <reg32 offset="0x8f02" name="RB_CMP_DBG_ECO_CNTL" variants="A8XX-"/> + + <reg32 offset="0x8f03" name="RB_UNSLICE_STATUS" variants="A8XX-"/> + <reg32 offset="0x8e28" name="RB_CMP_DBG_ECO_CNTL" variants="A6XX-A7XX"/> <!-- 0x8e29-0x8e2b invalid --> - <array offset="0x8e2c" name="RB_PERFCTR_CMP_SEL" stride="1" length="4"/> - <array offset="0x8e30" name="RB_PERFCTR_UFC_SEL" stride="1" length="6" variants="A7XX-"/> - <reg32 offset="0x8e3b" name="RB_RB_SUB_BLOCK_SEL_CNTL_HOST"/> - <reg32 offset="0x8e3d" name="RB_RB_SUB_BLOCK_SEL_CNTL_CD"/> + <array offset="0x8e2c" name="RB_PERFCTR_CMP_SEL" stride="1" length="4" variants="A6XX-A7XX"/> + <array offset="0x8e30" name="RB_PERFCTR_UFC_SEL" stride="1" length="6" variants="A7XX"/> + <array offset="0x8f04" name="RB_PERFCTR_CMP_SEL" stride="1" length="4" variants="A8XX-"/> + <array offset="0x8f10" name="RB_PERFCTR_UFC_SEL" stride="1" length="6" variants="A8XX-"/> + <reg32 offset="0x8e3b" name="RB_SUB_BLOCK_SEL_CNTL_HOST"/> + <reg32 offset="0x8e3d" name="RB_SUB_BLOCK_SEL_CNTL_CD"/> + <reg32 offset="0x8f29" name="RB_UFC_DBG_CNTL" variants="A8XX-"/> <!-- 0x8e3e-0x8e4f invalid --> <!-- GMEM save/restore for preemption: --> <reg32 offset="0x8e50" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE" pos="0" type="boolean"/> <!-- address for GMEM save/restore? --> <reg32 offset="0x8e51" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ADDR" type="waddress" align="1"/> - <!-- 0x8e53-0x8e7f invalid --> - <reg32 offset="0x8e79" name="RB_UNKNOWN_8E79" variants="A7XX-" usage="cmd"/> + <reg32 offset="0x8e77" name="RB_SLICE_UFC_PREFETCH_CNTL" variants="A8XX-"/> + <reg32 offset="0x8e78" name="RB_SLICE_UFC_DBG_CNTL" variants="A8XX-"/> + <reg32 offset="0x8e79" name="RB_UNKNOWN_8E79" variants="A7XX" usage="init"/> <!-- 0x8e80-0x8e83 are valid --> <!-- 0x8e84-0x90ff invalid --> @@ -2014,6 +2864,10 @@ by a particular renderpass/blit. <reg32 offset="0x9102" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x9103" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9307" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A8XX" usage="rp_blit"/> + <reg32 offset="0x9308" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A8XX" usage="rp_blit"/> + <reg32 offset="0x9309" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A8XX" usage="rp_blit"/> + <reg32 offset="0x9311" name="VPC_VS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x9312" name="VPC_GS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x9313" name="VPC_DS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/> @@ -2028,6 +2882,9 @@ by a particular renderpass/blit. <reg32 offset="0x9105" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x9106" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x930a" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x930b" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x930c" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A8XX-" usage="rp_blit"/> <reg32 offset="0x9314" name="VPC_VS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x9315" name="VPC_GS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/> @@ -2042,6 +2899,7 @@ by a particular renderpass/blit. <reg32 offset="0x9980" name="VPC_RAST_STREAM_CNTL" type="a6xx_vpc_rast_stream_cntl" variants="A6XX" usage="rp_blit"/> <reg32 offset="0x9107" name="VPC_RAST_STREAM_CNTL" type="a6xx_vpc_rast_stream_cntl" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x930d" name="VPC_RAST_STREAM_CNTL" type="a6xx_vpc_rast_stream_cntl" variants="A8XX-" usage="rp_blit"/> <reg32 offset="0x9317" name="VPC_RAST_STREAM_CNTL_V2" type="a6xx_vpc_rast_stream_cntl" variants="A7XX" usage="rp_blit"/> <reg32 offset="0x9107" name="VPC_UNKNOWN_9107" variants="A6XX" usage="rp_blit"> @@ -2051,6 +2909,7 @@ by a particular renderpass/blit. </reg32> <reg32 offset="0x9108" name="VPC_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x930e" name="VPC_RAST_CNTL" type="a6xx_rast_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_pc_cntl" inline="yes"> <bitfield name="PRIMITIVE_RESTART" pos="0" type="boolean"/> <bitfield name="PROVOKING_VTX_LAST" pos="1" type="boolean"/> @@ -2091,9 +2950,13 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x9109" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x930f" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A8XX-" usage="rp_blit"/> <reg32 offset="0x910a" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x90c0" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A8XX-" usage="rp_blit"/> <reg32 offset="0x910b" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x90c1" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A8XX-" usage="rp_blit"/> <reg32 offset="0x910c" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x931a" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A8XX-" usage="rp_blit"/> <enum name="a6xx_varying_interp_mode"> <value value="0" name="INTERP_SMOOTH"/> @@ -2119,6 +2982,15 @@ by a particular renderpass/blit. <reg32 offset="0x0" name="MODE"/> </array> + <array offset="0x9240" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" variants="A8XX-" usage="rp_blit"> + <doc>Packed array of a6xx_varying_interp_mode</doc> + <reg32 offset="0x0" name="MODE"/> + </array> + <array offset="0x9248" name="VPC_VARYING_REPLACE_MODE" stride="1" length="8" variants="A8XX-" usage="rp_blit"> + <doc>Packed array of a6xx_varying_ps_repl_mode</doc> + <reg32 offset="0x0" name="MODE"/> + </array> + <!-- always 0x0 --> <reg32 offset="0x9210" name="VPC_UNKNOWN_9210" low="0" high="31" variants="A6XX" usage="cmd"/> <reg32 offset="0x9211" name="VPC_UNKNOWN_9211" low="0" high="31" variants="A6XX" usage="cmd"/> @@ -2128,6 +3000,11 @@ by a particular renderpass/blit. <reg32 offset="0" name="DISABLE"/> </array> + <array offset="0x9252" name="VPC_VARYING_LM_TRANSFER_CNTL" stride="1" length="4" variants="A8XX-" usage="rp_blit"> + <!-- one bit per varying component: --> + <reg32 offset="0" name="DISABLE"/> + </array> + <bitset name="a6xx_vpc_so_mapping_wptr" inline="yes"> <!-- Choose which DWORD to write to. There is an array of @@ -2158,6 +3035,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x9216" name="VPC_SO_MAPPING_WPTR" type="a6xx_vpc_so_mapping_wptr" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9180" name="VPC_SO_MAPPING_WPTR" type="a6xx_vpc_so_mapping_wptr" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_vpc_so_mapping_port" inline="yes"> <bitfield name="A_BUF" low="0" high="1" type="uint"/> @@ -2170,8 +3048,10 @@ by a particular renderpass/blit. <!-- special register, write multiple times to load SO program (not readable) --> <reg32 offset="0x9217" name="VPC_SO_MAPPING_PORT" type="a6xx_vpc_so_mapping_port" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9181" name="VPC_SO_MAPPING_PORT" type="a6xx_vpc_so_mapping_port" variants="A8XX-" usage="rp_blit"/> <reg64 offset="0x9218" name="VPC_SO_QUERY_BASE" type="waddress" align="32" variants="A6XX-A7XX" usage="cmd"/> + <reg64 offset="0x9182" name="VPC_SO_QUERY_BASE" type="waddress" align="32" variants="A8XX-" usage="cmd"/> <array offset="0x921a" name="VPC_SO" stride="7" length="4" variants="A6XX-A7XX" usage="cmd"> <reg64 offset="0" name="BUFFER_BASE" type="waddress" align="32"/> @@ -2181,13 +3061,23 @@ by a particular renderpass/blit. <reg64 offset="5" name="FLUSH_BASE" type="waddress" align="32"/> </array> + <array offset="0x9184" name="VPC_SO" stride="7" length="4" variants="A8XX-" usage="cmd"> + <reg64 offset="0" name="BUFFER_BASE" type="waddress" align="32"/> + <reg32 offset="2" name="BUFFER_SIZE" low="2" high="31" shr="2"/> + <reg32 offset="3" name="BUFFER_STRIDE" low="0" high="9" shr="2"/> + <reg32 offset="4" name="BUFFER_OFFSET" low="2" high="31" shr="2"/> + <reg64 offset="5" name="FLUSH_BASE" type="waddress" align="32"/> + </array> + <bitset name="a6xx_vpc_replace_mode_cntl" inline="yes"> <bitfield name="INVERT" pos="0" type="boolean"/> </bitset> <reg32 offset="0x9236" name="VPC_REPLACE_MODE_CNTL" type="a6xx_vpc_replace_mode_cntl" variants="A6XX-A7XX" usage="cmd"/> + <reg32 offset="0x9310" name="VPC_REPLACE_MODE_CNTL" type="a6xx_vpc_replace_mode_cntl" variants="A8XX-" usage="cmd"/> <reg32 offset="0x9300" name="VPC_ROTATION_CNTL" low="0" high="2" variants="A6XX-A7XX" usage="cmd"/> + <reg32 offset="0x9312" name="VPC_ROTATION_CNTL" low="0" high="2" variants="A8XX-" usage="cmd"/> <bitset name="a6xx_vpc_xs_cntl" inline="yes"> <doc> @@ -2211,6 +3101,10 @@ by a particular renderpass/blit. <reg32 offset="0x9302" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x9303" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9300" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x9301" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x9302" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" variants="A8XX-" usage="rp_blit"/> + <bitset name="a6xx_vpc_ps_cntl" inline="yes"> <bitfield name="NUMNONPOSVAR" low="0" high="7" type="uint"/> <!-- for fixed-function (i.e. no GS) gl_PrimitiveID in FS --> @@ -2231,6 +3125,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x9304" name="VPC_PS_CNTL" type="a6xx_vpc_ps_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9303" name="VPC_PS_CNTL" type="a6xx_vpc_ps_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_vpc_so_cntl" inline="yes"> <!-- @@ -2244,40 +3139,73 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x9305" name="VPC_SO_CNTL" type="a6xx_vpc_so_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9304" name="VPC_SO_CNTL" type="a6xx_vpc_so_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_so_override" inline="yes"> <bitfield name="DISABLE" pos="0" type="boolean"/> </bitset> <reg32 offset="0x9306" name="VPC_SO_OVERRIDE" type="a6xx_so_override" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9305" name="VPC_SO_OVERRIDE" type="a6xx_so_override" variants="A8XX-" usage="rp_blit"/> <reg32 offset="0x9807" name="PC_DGEN_SO_OVERRIDE" type="a6xx_so_override" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x9b0a" name="PC_DGEN_SO_OVERRIDE" type="a6xx_so_override" variants="A8XX-" usage="rp_blit"/> <reg32 offset="0x9307" name="VPC_PS_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9306" name="VPC_PS_RAST_CNTL" type="a6xx_rast_cntl" variants="A8XX-" usage="rp_blit"/> - <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX" type="uint" usage="rp_blit"/> - <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX" type="uint" usage="rp_blit"/> + <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX" type="uint" usage="cmd"/> + <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX" type="hex" usage="cmd"/> - <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX" type="uint" usage="rp_blit"/> + <reg32 offset="0x9314" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/> + <reg32 offset="0x9315" name="VPC_ATTR_BUF_GMEM_BASE" variants="A8XX-" type="hex" usage="cmd"/> + + <reg32 offset="0x9316" name="VPC_POS_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/> + <reg32 offset="0x9317" name="VPC_POS_BUF_GMEM_BASE" variants="A8XX-" type="hex" usage="cmd"/> + + <reg32 offset="0x9318" name="VPC_BV_POS_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/> + <reg32 offset="0x9319" name="VPC_BV_POS_BUF_GMEM_BASE" variants="A8XX-" type="hex" usage="cmd"/> + + <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX" type="uint" usage="cmd"/> + <reg32 offset="0x9b16" name="PC_ATTR_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/> + + <reg32 offset="0x9b17" name="PC_POS_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/> + <reg32 offset="0x9b18" name="PC_BV_POS_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/> <reg32 offset="0x930a" name="VPC_UNKNOWN_930A" variants="A7XX"/> + <reg32 offset="0x9313" name="VPC_UNKNOWN_9313" variants="A8XX-"/> + <reg32 offset="0x9e17" name="PC_UNKNOWN_9E17" variants="A8XX-"/> + <reg32 offset="0x960a" name="VPC_FLATSHADE_MODE_CNTL" variants="A7XX"/> + <reg32 offset="0x9741" name="VPC_FLATSHADE_MODE_CNTL" variants="A8XX-"/> <!-- 0x9307-0x95ff invalid --> <!-- TODO: 0x9600-0x97ff range --> - <reg32 offset="0x9600" name="VPC_DBG_ECO_CNTL" usage="cmd"/> <!-- always 0x0 ? TODO: 0x1fbf37ff valid mask --> - <reg32 offset="0x9601" name="VPC_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" usage="cmd"/> - <reg32 offset="0x9602" name="VPC_UNKNOWN_9602" pos="0" usage="cmd"/> <!-- always 0x0 ? --> - <reg32 offset="0x9603" name="VPC_UNKNOWN_9603" low="0" high="26"/> + <reg32 offset="0x9600" name="VPC_DBG_ECO_CNTL" variants="A6XX-A7XX" usage="cmd"/> <!-- always 0x0 ? TODO: 0x1fbf37ff valid mask --> + <reg32 offset="0x9601" name="VPC_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" usage="cmd" variants="A6XX"/> + <reg32 offset="0x9680" name="VPC_DBG_ECO_CNTL" variants="A8XX-"/> + <reg32 offset="0x9604" name="VPC_DBG_ECO_CNTL_2" variants="A8XX-"/> + <reg32 offset="0x9742" name="VPC_DBG_ECO_CNTL_1" variants="A8XX-"/> + <reg32 offset="0x9745" name="VPC_DBG_ECO_CNTL_3" variants="A8XX-"/> + <reg32 offset="0x9602" name="VPC_LB_MODE_CNTL" pos="0" variants="A6XX-A7XX" usage="init"/> <!-- always 0x0 ? --> + <reg32 offset="0x9740" name="VPC_LB_MODE_CNTL" pos="0" variants="A8XX-"/> + <reg32 offset="0x9603" name="VPC_STATUS" low="0" high="26" variants="A6XX-A7XX"/> + <reg32 offset="0x9600" name="VPC_STATUS" low="0" high="26" variants="A8XX-"/> <array offset="0x9604" name="VPC_PERFCTR_VPC_SEL" stride="1" length="6" variants="A6XX"/> - <array offset="0x960b" name="VPC_PERFCTR_VPC_SEL" stride="1" length="12" variants="A7XX-"/> - <!-- 0x960a-0x9623 invalid --> - <!-- TODO: regs from 0x9624-0x963a --> - <!-- 0x963b-0x97ff invalid --> + <array offset="0x960b" name="VPC_PERFCTR_VPC_SEL" stride="1" length="12" variants="A7XX"/> + <array offset="0x9670" name="VPC_PERFCTR_VPC_SEL_2" stride="1" length="12" variants="A8XX-"/> + <array offset="0x9690" name="VPC_PERFCTR_VPC_SEL" stride="1" length="12" variants="A8XX-"/> + <array offset="0x9750" name="VPC_PERFCTR_VPC_SEL_1" stride="1" length="12" variants="A8XX-"/> + + <reg64 offset="0x9634" name="VPC_CONTEXT_SWITCH_SO_SAVE_ADDR" type="waddress" variants="A6XX-A7XX"/> + <reg64 offset="0x9602" name="VPC_CONTEXT_SWITCH_SO_SAVE_ADDR" type="waddress" variants="A8XX-"/> + + <reg32 offset="0x980b" name="PC_UNKNOWN_980B" variants="A8XX-"/> <reg32 offset="0x9800" name="PC_HS_PARAM_0" low="0" high="5" type="uint" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b10" name="PC_HS_PARAM_0" low="0" high="5" type="uint" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_pc_hs_param_1" inline="yes"> <bitfield name="SIZE" low="0" high="10" type="uint"/> @@ -2285,6 +3213,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x9801" name="PC_HS_PARAM_1" type="a6xx_pc_hs_param_1" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b11" name="PC_HS_PARAM_1" type="a6xx_pc_hs_param_1" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_pc_ds_param" inline="yes"> <bitfield name="SPACING" low="0" high="1" type="a6xx_tess_spacing"/> @@ -2292,10 +3221,13 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x9802" name="PC_DS_PARAM" type="a6xx_pc_ds_param" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b12" name="PC_DS_PARAM" type="a6xx_pc_ds_param" variants="A8XX-" usage="rp_blit"/> <reg32 offset="0x9803" name="PC_RESTART_INDEX" low="0" high="31" type="uint" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b15" name="PC_RESTART_INDEX" low="0" high="31" type="uint" variants="A8XX-" usage="rp_blit"/> <reg32 offset="0x9804" name="PC_MODE_CNTL" low="0" high="7" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b00" name="PC_MODE_CNTL" low="0" high="14" variants="A8XX" usage="rp_blit"/> <reg32 offset="0x9805" name="PC_POWER_CNTL" low="0" high="2" usage="rp_blit"/> @@ -2304,6 +3236,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x9806" name="PC_PS_CNTL" type="a6xx_pc_ps_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b06" name="PC_PS_CNTL" type="a6xx_pc_ps_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_pc_dgen_so_cntl" inline="yes"> <bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/> @@ -2311,12 +3244,19 @@ by a particular renderpass/blit. <!-- New in a6xx gen3+ --> <reg32 offset="0x9808" name="PC_DGEN_SO_CNTL" type="a6xx_pc_dgen_so_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b0b" name="PC_DGEN_SO_CNTL" type="a6xx_pc_dgen_so_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_pc_dgen_su_conservative_ras_cntl" inline="yes"> <bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/> </bitset> <reg32 offset="0x980a" name="PC_DGEN_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_pc_dgen_su_conservative_ras_cntl" variants="A6XX-A7XX"/> + <reg32 offset="0x9b08" name="PC_DGEN_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_pc_dgen_su_conservative_ras_cntl" variants="A8XX-"/> + + <reg32 offset="0x9b0c" name="PC_VS_INPUT_CNTL" variants="A8XX-" usage="rp_blit"> + <bitfield name="INSTR_CNT" low="0" high="5" type="uint"/> + <bitfield name="SIDEBAND_CNT" low="6" high="8" type="uint"/> + </reg32> <!-- 0x9840 - 0x9842 are not readable --> <bitset name="a6xx_draw_initiator" inline="yes"> @@ -2326,6 +3266,9 @@ by a particular renderpass/blit. <reg32 offset="0x9840" name="PC_DRAW_INITIATOR" type="a6xx_draw_initiator" variants="A6XX-A7XX"/> <reg32 offset="0x9841" name="PC_KERNEL_INITIATOR" type="a6xx_draw_initiator" variants="A6XX-A7XX"/> + <reg32 offset="0x9800" name="PC_DRAW_INITIATOR" type="a6xx_draw_initiator" variants="A8XX-"/> + <reg32 offset="0x9801" name="PC_KERNEL_INITIATOR" type="a6xx_draw_initiator" variants="A8XX-"/> + <bitset name="a6xx_event_initiator" inline="yes"> <!-- I think only the low bit is actually used? --> <bitfield name="STATE_ID" low="16" high="23"/> @@ -2333,6 +3276,7 @@ by a particular renderpass/blit. </bitset> <reg32 offset="0x9842" name="PC_EVENT_INITIATOR" type="a6xx_event_initiator" variants="A6XX-A7XX"/> + <reg32 offset="0x9802" name="PC_EVENT_INITIATOR" type="a6xx_event_initiator" variants="A8XX-"/> <!-- 0x9880 written in a lot of places by SQE, same value gets written @@ -2345,19 +3289,23 @@ by a particular renderpass/blit. <reg32 offset="0x9981" name="PC_DGEN_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX" usage="rp_blit"/> <reg32 offset="0x9809" name="PC_DGEN_RAST_CNTL" type="a6xx_rast_cntl" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0x9812" name="PC_DGEN_RAST_CNTL" type="a6xx_rast_cntl" variants="A8XX" usage="rp_blit"/> <!-- Both are a750+. Probably needed to correctly overlap execution of several draws. --> <reg32 offset="0x9885" name="PC_HS_BUFFER_SIZE" variants="A7XX" usage="cmd"/> + <reg32 offset="0x9814" name="PC_HS_BUFFER_SIZE" variants="A8XX-" usage="cmd"/> <!-- Blob adds a bit more space {0x10, 0x20, 0x30, 0x40} bytes, but the meaning of this additional space is not known. --> <reg32 offset="0x9886" name="PC_TF_BUFFER_SIZE" variants="A7XX" usage="cmd"/> + <reg32 offset="0x9815" name="PC_TF_BUFFER_SIZE" variants="A8XX-" usage="cmd"/> <!-- 0x9982-0x9aff invalid --> <reg32 offset="0x9b00" name="PC_CNTL" type="a6xx_pc_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b01" name="PC_CNTL" type="a6xx_pc_cntl" variants="A8XX-" usage="rp_blit"/> <bitset name="a6xx_pc_xs_cntl" inline="yes"> <doc> @@ -2381,7 +3329,13 @@ by a particular renderpass/blit. <reg32 offset="0x9b03" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/> <reg32 offset="0x9b04" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b02" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x9b03" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x9b04" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x9b05" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0x9b05" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b13" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A8XX-" usage="rp_blit"/> <reg32 offset="0x9b06" name="PC_PRIMITIVE_CNTL_6" variants="A6XX" usage="rp_blit"> <doc> @@ -2391,24 +3345,37 @@ by a particular renderpass/blit. </reg32> <reg32 offset="0x9b07" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b09" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A8XX-" usage="rp_blit"/> <!-- mask of enabled views, doesn't exist on A630 --> <reg32 offset="0x9b08" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0x9b0d" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A8XX-" usage="rp_blit"/> <!-- 0x9b09-0x9bff invalid --> <reg32 offset="0x9c00" name="PC_2D_EVENT_CMD"> <!-- special register (but note first 8 bits can be written/read) --> <bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/> <bitfield name="STATE_ID" low="8" high="15"/> </reg32> - <!-- 0x9c01-0x9dff invalid --> - <!-- TODO: 0x9e00-0xa000 range incomplete --> - <reg32 offset="0x9e00" name="PC_DBG_ECO_CNTL"/> - <reg32 offset="0x9e01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode"/> + + <reg32 offset="0x9e50" name="PC_CHICKEN_BITS_1" variants="A8XX-"/> + <reg32 offset="0x9f20" name="PC_CHICKEN_BITS_2" variants="A8XX-"/> + <reg32 offset="0x9e22" name="PC_CHICKEN_BITS_3" variants="A8XX-"/> + <reg32 offset="0x9e23" name="PC_CHICKEN_BITS_4" variants="A8XX-"/> + <reg32 offset="0x9f23" name="PC_CHICKEN_BITS_5" variants="A8XX-"/> + + <reg32 offset="0x9e00" name="PC_DBG_ECO_CNTL" variants="A6XX-A7XX"/> + <reg32 offset="0x9e53" name="PC_DBG_ECO_CNTL" variants="A8XX-"/> + <reg32 offset="0x9e01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/> <reg64 offset="0x9e04" name="PC_DMA_BASE" type="address" variants="A6XX-A7XX"/> <reg32 offset="0x9e06" name="PC_DMA_OFFSET" type="uint" variants="A6XX-A7XX"/> <reg32 offset="0x9e07" name="PC_DMA_SIZE" type="uint" variants="A6XX-A7XX"/> + <reg64 offset="0x9e06" name="PC_DMA_BASE" type="address" variants="A8XX-"/> + <reg32 offset="0x9e08" name="PC_DMA_OFFSET" type="uint" variants="A8XX-"/> + <reg32 offset="0x9e09" name="PC_DMA_SIZE" type="uint" variants="A8XX-"/> + <reg64 offset="0x9e08" name="PC_TESS_BASE" variants="A6XX" type="waddress" align="32" usage="cmd"/> <reg64 offset="0x9810" name="PC_TESS_BASE" variants="A7XX" type="waddress" align="32" usage="cmd"/> + <reg64 offset="0x9816" name="PC_TESS_BASE" variants="A8XX-" type="waddress" align="32" usage="cmd"/> <reg32 offset="0x9e0b" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx" variants="A6XX-A7XX"> <doc> @@ -2419,6 +3386,10 @@ by a particular renderpass/blit. <reg32 offset="0x9e0c" name="PC_DRAWCALL_INSTANCE_NUM" type="uint" variants="A6XX-A7XX"/> <reg32 offset="0x9e0d" name="PC_DRAWCALL_SIZE" type="uint" variants="A6XX-A7XX"/> + <reg32 offset="0x9e00" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx" variants="A8XX-"/> + <reg32 offset="0x9e01" name="PC_DRAWCALL_INSTANCE_NUM" type="uint" variants="A8XX-"/> + <reg32 offset="0x9e02" name="PC_DRAWCALL_SIZE" type="uint" variants="A8XX-"/> + <!-- These match the contents of CP_SET_BIN_DATA (not written directly) --> <bitset name="a6xx_pc_vis_stream_cntl" inline="yes"> <bitfield name="UNK0" low="0" high="15"/> @@ -2430,20 +3401,30 @@ by a particular renderpass/blit. <reg64 offset="0x9e12" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A6XX-A7XX"/> <reg64 offset="0x9e14" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A6XX-A7XX"/> + <reg32 offset="0x9e0a" name="PC_AUTO_VERTEX_STRIDE"/> + <reg32 offset="0x9e0d" name="PC_VIS_STREAM_CNTL" type="a6xx_pc_vis_stream_cntl" variants="A8XX-"/> + <reg64 offset="0x9e0e" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A8XX-"/> + <reg64 offset="0x9e10" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A8XX-"/> + <bitset name="a6xx_pc_drawcall_cntl_override" inline="yes"> <doc>Written by CP_SET_VISIBILITY_OVERRIDE handler</doc> <bitfield name="OVERRIDE" pos="0" type="boolean"/> </bitset> <reg32 offset="0x9e1c" name="PC_DRAWCALL_CNTL_OVERRIDE" type="a6xx_pc_drawcall_cntl_override" variants="A6XX-A7XX"/> + <reg32 offset="0x9e04" name="PC_DRAWCALL_CNTL_OVERRIDE" type="a6xx_pc_drawcall_cntl_override" variants="A8XX-"/> - <reg32 offset="0x9e24" name="PC_UNKNOWN_9E24" variants="A7XX-" usage="cmd"/> + <reg32 offset="0x9e24" name="PC_UNKNOWN_9E24" variants="A7XX-" usage="init"/> <array offset="0x9e34" name="PC_PERFCTR_PC_SEL" stride="1" length="8" variants="A6XX"/> - <array offset="0x9e42" name="PC_PERFCTR_PC_SEL" stride="1" length="16" variants="A7XX-"/> + <array offset="0x9e42" name="PC_PERFCTR_PC_SEL" stride="1" length="16" variants="A7XX"/> + <array offset="0x9e30" name="PC_PERFCTR_PC_SEL" stride="1" length="16" variants="A8XX-"/> + <array offset="0x9f00" name="PC_SLICE_PERFCTR_PC_SEL" stride="1" length="16" variants="A8XX-"/> <!-- always 0x0 --> - <reg32 offset="0x9e72" name="PC_UNKNOWN_9E72" usage="cmd"/> + <reg32 offset="0x9e72" name="PC_CONTEXT_SWITCH_GFX_PREEMPTION_MODE" variants="A6XX-A7XX" usage="init"/> + <reg32 offset="0x9e63" name="PC_CONTEXT_SWITCH_GFX_PREEMPTION_MODE" variants="A8XX-"/> + <reg32 offset="0x9e64" name="PC_CONTEXT_SWITCH_STABILIZE_CNTL_1" variants="A8XX-"/> <reg32 offset="0xa000" name="VFD_CNTL_0" usage="rp_blit"> <bitfield name="FETCH_CNT" low="0" high="5" type="uint"/> @@ -2530,11 +3511,15 @@ by a particular renderpass/blit. <reg32 offset="0xa0f8" name="VFD_POWER_CNTL" low="0" high="2" usage="rp_blit"/> - <reg32 offset="0xa600" name="VFD_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/> + <reg32 offset="0xa600" name="VFD_DBG_ECO_CNTL" variants="A7XX-" usage="init"/> - <reg32 offset="0xa601" name="VFD_ADDR_MODE_CNTL" type="a5xx_address_mode"/> + <reg32 offset="0xa601" name="VFD_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/> <array offset="0xa610" name="VFD_PERFCTR_VFD_SEL" stride="1" length="8" variants="A6XX"/> <array offset="0xa610" name="VFD_PERFCTR_VFD_SEL" stride="1" length="16" variants="A7XX-"/> + <reg32 offset="0xa639" name="VFD_CB_BV_THRESHOLD" variants="A8XX-"/> + <reg32 offset="0xa63a" name="VFD_CB_BR_THRESHOLD" variants="A8XX-"/> + <reg32 offset="0xa63b" name="VFD_CB_BUSY_REQ_CNT" variants="A8XX-"/> + <reg32 offset="0xa63c" name="VFD_CB_LP_REQ_CNT" variants="A8XX-"/> <!-- Note: this seems to always be paired with another bit in another @@ -2593,8 +3578,6 @@ by a particular renderpass/blit. <bitset name="a6xx_sp_xs_output_cntl" inline="yes"> <!-- # of VS outputs including pos/psize --> <bitfield name="OUT" low="0" high="5" type="uint"/> - <!-- FLAGS_REGID only for GS --> - <bitfield name="FLAGS_REGID" low="6" high="13" type="a3xx_regid"/> </bitset> <reg32 offset="0xa800" name="SP_VS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit"> @@ -2720,6 +3703,15 @@ by a particular renderpass/blit. <bitfield name="OFFSET" low="0" high="18" shr="11"/> </bitset> + <bitset name="a6xx_sp_xs_hysteresis" inline="yes"> + <doc>Same on a6xx/a7xx, UMD should not need to write this</doc> + </bitset> + + <bitset name="a8xx_sp_xs_hysteresis" inline="yes"> + <doc>UMD needs to write in some cases</doc> + <!-- seen 0x400, 0xc00, 0x1000, 0x1c00, 0x1000, 0x2000, 0x3000 --> + </bitset> + <reg32 offset="0xa81b" name="SP_VS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/> <reg64 offset="0xa81c" name="SP_VS_BASE" type="address" align="32" usage="rp_blit"/> <reg32 offset="0xa81e" name="SP_VS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/> @@ -2729,6 +3721,8 @@ by a particular renderpass/blit. <reg32 offset="0xa823" name="SP_VS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/> <reg32 offset="0xa824" name="SP_VS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/> <reg32 offset="0xa825" name="SP_VS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/> + <reg32 offset="0xa826" name="SP_VS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/> + <reg32 offset="0xa826" name="SP_VS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/> <reg32 offset="0xa82d" name="SP_VS_VGS_CNTL" variants="A7XX-" usage="cmd"/> <reg32 offset="0xa830" name="SP_HS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit"> @@ -2754,6 +3748,8 @@ by a particular renderpass/blit. <reg32 offset="0xa83b" name="SP_HS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/> <reg32 offset="0xa83c" name="SP_HS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/> <reg32 offset="0xa83d" name="SP_HS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/> + <reg32 offset="0xa83e" name="SP_HS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/> + <reg32 offset="0xa83e" name="SP_HS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/> <reg32 offset="0xa82f" name="SP_HS_VGS_CNTL" variants="A7XX-" usage="cmd"/> <reg32 offset="0xa840" name="SP_DS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit"> @@ -2791,6 +3787,8 @@ by a particular renderpass/blit. <reg32 offset="0xa863" name="SP_DS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/> <reg32 offset="0xa864" name="SP_DS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/> <reg32 offset="0xa865" name="SP_DS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/> + <reg32 offset="0xa866" name="SP_DS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/> + <reg32 offset="0xa866" name="SP_DS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/> <reg32 offset="0xa868" name="SP_DS_VGS_CNTL" variants="A7XX-" usage="cmd"/> <reg32 offset="0xa870" name="SP_GS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit"> @@ -2814,7 +3812,10 @@ by a particular renderpass/blit. <reg32 offset="0xa872" name="SP_GS_BOOLEAN_CF_MASK" type="hex" usage="rp_blit"/> <!-- TODO: exact same layout as 0xa802-0xa81a --> - <reg32 offset="0xa873" name="SP_GS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/> + <reg32 offset="0xa873" name="SP_GS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"> + <!-- FLAGS_REGID only for GS --> + <bitfield name="FLAGS_REGID" low="6" high="13" type="a3xx_regid"/> + </reg32> <array offset="0xa874" name="SP_GS_OUTPUT" stride="1" length="16" usage="rp_blit"> <reg32 offset="0x0" name="REG"> <bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/> @@ -2843,6 +3844,8 @@ by a particular renderpass/blit. <reg32 offset="0xa894" name="SP_GS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/> <reg32 offset="0xa895" name="SP_GS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/> <reg32 offset="0xa896" name="SP_GS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/> + <reg32 offset="0xa897" name="SP_GS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/> + <reg32 offset="0xa897" name="SP_GS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/> <reg32 offset="0xa899" name="SP_GS_VGS_CNTL" variants="A7XX-" usage="cmd"/> <reg64 offset="0xa8a0" name="SP_VS_SAMPLER_BASE" type="address" align="16" usage="cmd"/> @@ -2886,13 +3889,19 @@ by a particular renderpass/blit. <reg64 offset="0xa986" name="SP_PS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/> <reg32 offset="0xa988" name="SP_PS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/> - <reg32 offset="0xa989" name="SP_BLEND_CNTL" usage="rp_blit"> + <bitset name="a6xx_sp_blend_cntl" inline="yes"> <!-- per-mrt enable bit --> <bitfield name="ENABLE_BLEND" low="0" high="7"/> - <bitfield name="UNK8" pos="8" type="boolean"/> + <bitfield name="INDEPENDENT_BLEND_EN" pos="8" type="boolean"/> <bitfield name="DUAL_COLOR_IN_ENABLE" pos="9" type="boolean"/> <bitfield name="ALPHA_TO_COVERAGE" pos="10" type="boolean"/> + </bitset> + + <reg32 offset="0xa989" name="SP_BLEND_CNTL" type="a6xx_sp_blend_cntl" variants="A6XX-A7XX" usage="rp_blit"/> + <reg32 offset="0xa989" name="SP_BLEND_CNTL" type="a6xx_sp_blend_cntl" variants="A8XX-" usage="rp_blit"> + <bitfield name="ALPHA_TO_ONE" pos="11" type="boolean" variants="A8XX-"/> </reg32> + <reg32 offset="0xa98a" name="SP_SRGB_CNTL" usage="rp_blit"> <!-- Same as RB_SRGB_CNTL --> <bitfield name="SRGB_MRT0" pos="0" type="boolean"/> @@ -2993,13 +4002,11 @@ by a particular renderpass/blit. <reg32 offset="0xa9a7" name="SP_PS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/> <reg32 offset="0xa9a8" name="SP_UNKNOWN_A9A8" low="0" high="16" usage="cmd"/> <!-- always 0x0 ? --> <reg32 offset="0xa9a9" name="SP_PS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/> - <reg32 offset="0xa9ab" name="SP_PS_UNKNOWN_A9AB" variants="A7XX-" usage="cmd"/> + <reg32 offset="0xa9ab" name="SP_PS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/> + <reg32 offset="0xa9ab" name="SP_PS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/> <!-- TODO: unknown bool register at 0xa9aa, likely same as 0xa8c0-0xa8c3 but for FS --> - - - <reg32 offset="0xa9b0" name="SP_CS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="cmd"> <bitfield name="THREADSIZE" pos="20" type="a6xx_threadsize"/> <!-- seems to make SP use less concurrent threads when possible? --> @@ -3036,6 +4043,7 @@ by a particular renderpass/blit. must be at least the actual CONSTLEN. </doc> </bitfield> + <bitfield name="ALT_LM_ENCODE" pos="26" type="boolean"/> </reg32> <reg32 offset="0xa9b2" name="SP_CS_BOOLEAN_CF_MASK" type="hex" usage="cmd"/> <reg32 offset="0xa9b3" name="SP_CS_PROGRAM_COUNTER_OFFSET" type="uint" usage="cmd"/> @@ -3047,7 +4055,8 @@ by a particular renderpass/blit. <reg32 offset="0xa9bb" name="SP_CS_CONFIG" type="a6xx_sp_xs_config" usage="cmd"/> <reg32 offset="0xa9bc" name="SP_CS_INSTR_SIZE" low="0" high="27" type="uint" usage="cmd"/> <reg32 offset="0xa9bd" name="SP_CS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="cmd"/> - <reg32 offset="0xa9be" name="SP_CS_UNKNOWN_A9BE" variants="A7XX-" usage="cmd"/> + <reg32 offset="0xa9be" name="SP_CS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/> + <reg32 offset="0xa9be" name="SP_CS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/> <reg32 offset="0xa9c5" name="SP_CS_VGS_CNTL" variants="A7XX-" usage="cmd"/> <!-- new in a6xx gen4, matches SP_CS_CONST_CONFIG_0 --> @@ -3158,6 +4167,18 @@ by a particular renderpass/blit. <bitfield name="RT7" low="28" high="31"/> </reg32> + <array offset="0xaa04" name="SP_MRT_BLEND_CNTL" stride="1" length="8" variants="A8XX-"> + <reg32 offset="0" name="REG"> + <bitfield name="COLOR_BLEND_EN" pos="0" type="boolean"/> + <bitfield name="ALPHA_BLEND_EN" pos="1" type="boolean"/> + <bitfield name="COMPONENT_WRITE_MASK" low="7" high="10"/> + </reg32> + </array> + + <reg32 offset="0xaa0c" name="SP_ALPHA_TEST_CNTL" variants="A8XX-"> + <bitfield name="ALPHA_TEST" pos="8" type="boolean"/> + </reg32> + <reg32 offset="0xaaf2" name="SP_UNKNOWN_AAF2" type="uint" usage="cmd"/> <!-- @@ -3182,15 +4203,19 @@ by a particular renderpass/blit. --> <bitfield name="CONSTANT_DEMOTION_ENABLE" pos="0" type="boolean"/> <bitfield name="ISAMMODE" low="1" high="2" type="a6xx_isam_mode"/> - <bitfield name="SHARED_CONSTS_ENABLE" pos="3" type="boolean"/> <!-- see HLSQ_SHARED_CONSTS --> + <bitfield name="SHARED_CONSTS_ENABLE" pos="3" type="boolean"/> <!-- see SP_SHARED_CONSTANT --> </reg32> <reg32 offset="0xab01" name="SP_UNKNOWN_AB01" variants="A7XX-" usage="cmd"/> - <reg32 offset="0xab02" name="SP_UNKNOWN_AB02" variants="A7XX-" usage="cmd"/> + <reg32 offset="0xab02" name="SP_HLSQ_MODE_CNTL" variants="A7XX-" usage="cmd"> + <bitfield name="SHARED_CONSTS_ENABLE" pos="0" type="boolean"/> <!-- see SP_SHARED_CONSTANT --> + </reg32> <reg32 offset="0xab04" name="SP_PS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/> <reg32 offset="0xab05" name="SP_PS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/> + <reg32 offset="0xab06" name="SP_BIN_SIZE" type="a8xx_bin_size" variants="A8XX-" usage="rp_blit"/> + <array offset="0xab10" name="SP_GFX_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="rp_blit"> <reg64 offset="0" name="DESCRIPTOR" variants="A6XX"> <bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/> @@ -3210,9 +4235,12 @@ by a particular renderpass/blit. --> <reg64 offset="0xab1a" name="SP_GFX_UAV_BASE" type="address" align="16" usage="cmd"/> <reg32 offset="0xab20" name="SP_GFX_USIZE" low="0" high="6" type="uint" variants="A6XX-A7XX" usage="cmd"/> + <reg32 offset="0xab09" name="SP_GFX_USIZE" low="0" high="6" type="uint" variants="A8XX-" usage="cmd"/> <reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX" usage="cmd"/> + <reg32 offset="0xab23" name="SP_UNKNOWN_AB23" variants="A8XX-"/> + <enum name="a6xx_sp_a2d_output_ifmt_type"> <value name="OUTPUT_IFMT_2D_FLOAT" value="0"/> <value name="OUTPUT_IFMT_2D_SINT" value="1"/> @@ -3234,22 +4262,27 @@ by a particular renderpass/blit. <reg32 offset="0xacc0" name="SP_A2D_OUTPUT_INFO" type="a6xx_sp_a2d_output_info" variants="A6XX" usage="rp_blit"/> <reg32 offset="0xa9bf" name="SP_A2D_OUTPUT_INFO" type="a6xx_sp_a2d_output_info" variants="A7XX-" usage="rp_blit"/> - <reg32 offset="0xae00" name="SP_DBG_ECO_CNTL" usage="cmd"/> - <reg32 offset="0xae01" name="SP_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/> + <reg32 offset="0xae00" name="SP_DBG_ECO_CNTL" usage="init"/> + <reg32 offset="0xae01" name="SP_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" variants="A6XX"/> + <reg32 offset="0xae01" name="SP_SHADER_PROFILING" variants="A8XX-"/> <reg32 offset="0xae02" name="SP_NC_MODE_CNTL"> <!-- TODO: valid bits 0x3c3f, see kernel --> </reg32> - <reg32 offset="0xae03" name="SP_CHICKEN_BITS" usage="cmd"/> - <reg32 offset="0xae04" name="SP_NC_MODE_CNTL_2" usage="cmd"> + <reg32 offset="0xae03" name="SP_CHICKEN_BITS" usage="init"/> + <reg32 offset="0xae04" name="SP_NC_MODE_CNTL_2" usage="init"> <bitfield name="F16_NO_INF" pos="3" type="boolean"/> </reg32> - <reg32 offset="0xae06" name="SP_UNKNOWN_AE06" variants="A7XX-" usage="cmd"/> - <reg32 offset="0xae08" name="SP_CHICKEN_BITS_1" variants="A7XX-" usage="cmd"/> - <reg32 offset="0xae09" name="SP_CHICKEN_BITS_2" variants="A7XX-" usage="cmd"/> - <reg32 offset="0xae0a" name="SP_CHICKEN_BITS_3" variants="A7XX-" usage="cmd"/> + <reg32 offset="0xae05" name="SP_SS_CHICKEN_BITS_0" variants="A8XX-"/> + <reg32 offset="0xae06" name="SP_ISDB_CNTL" variants="A7XX-" usage="init"/> + <reg32 offset="0xae07" name="SP_PERFCTR_CNTL"/> + <reg32 offset="0xae08" name="SP_CHICKEN_BITS_1" variants="A7XX-" usage="init"/> + <reg32 offset="0xae09" name="SP_CHICKEN_BITS_2" variants="A7XX-" usage="init"/> + <reg32 offset="0xae0a" name="SP_CHICKEN_BITS_3" variants="A7XX-" usage="init"/> + <reg32 offset="0xae0b" name="SP_CHICKEN_BITS_4" variants="A8XX-"/> + <reg32 offset="0xae0c" name="SP_STATUS"/> - <reg32 offset="0xae0f" name="SP_PERFCTR_SHADER_MASK" usage="cmd"> + <reg32 offset="0xae0f" name="SP_PERFCTR_SHADER_MASK" usage="init"> <!-- some perfcntrs are affected by a per-stage enable bit (PERF_SP_ALU_WORKING_CYCLES for example) TODO: verify position of HS/DS/GS bits --> @@ -3260,24 +4293,47 @@ by a particular renderpass/blit. <bitfield name="FS" pos="4" type="boolean"/> <bitfield name="CS" pos="5" type="boolean"/> </reg32> - <array offset="0xae10" name="SP_PERFCTR_SP_SEL" stride="1" length="24"/> + <array offset="0xae10" name="SP_PERFCTR_SP_SEL" stride="1" length="24" variants="A6XX"/> <array offset="0xae60" name="SP_PERFCTR_HLSQ_SEL" stride="1" length="6" variants="A7XX-"/> - <reg32 offset="0xae6a" name="SP_UNKNOWN_AE6A" variants="A7XX-" usage="cmd"/> - <reg32 offset="0xae6b" name="SP_UNKNOWN_AE6B" variants="A7XX-" usage="cmd"/> - <reg32 offset="0xae6c" name="SP_HLSQ_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/> + <reg32 offset="0xae6a" name="SP_UNKNOWN_AE6A" variants="A7XX-" usage="init"/> + <reg32 offset="0xae6b" name="SP_HLSQ_TIMEOUT_THRESHOLD_DP" variants="A7XX-" usage="init"/> + <reg32 offset="0xae6c" name="SP_HLSQ_DBG_ECO_CNTL" variants="A7XX-" usage="init"/> <reg32 offset="0xae6d" name="SP_READ_SEL" variants="A7XX-"> + <bitfield name="CONTEXT" low="26" high="30"/> + <bitfield name="SLICE" low="21" high="25"/> <bitfield name="LOCATION" low="18" high="20" type="a7xx_state_location"/> - <bitfield name="PIPE" low="16" high="17" type="a7xx_pipe"/> + <bitfield name="PIPE" low="16" high="17" type="adreno_pipe"/> <bitfield name="STATETYPE" low="8" high="15" type="a7xx_statetype_id"/> <bitfield name="USPTP" low="4" high="7"/> <bitfield name="SPTP" low="0" high="3"/> </reg32> <reg32 offset="0xae71" name="SP_DBG_CNTL" variants="A7XX-"/> - <reg32 offset="0xae73" name="SP_UNKNOWN_AE73" variants="A7XX-" usage="cmd"/> + <reg32 offset="0xae73" name="SP_HLSQ_DBG_ECO_CNTL_1" variants="A7XX-"/> + <reg32 offset="0xae74" name="SP_HLSQ_DBG_ECO_CNTL_2" variants="A7XX-"/> + <reg32 offset="0xae76" name="SP_HLSQ_DBG_ECO_CNTL_3" variants="A8XX-"/> <array offset="0xae80" name="SP_PERFCTR_SP_SEL" stride="1" length="36" variants="A7XX-"/> <!-- TODO: there are 4 more percntr select registers (0xae28-0xae2b) --> <!-- TODO: there are a few unknown registers in the 0xae30-0xae52 range --> - <reg32 offset="0xbe22" name="SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE"/> + <reg32 offset="0xae52" name="SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE"/> + + <reg64 offset="0xae10" name="SP_HLSQ_GC_GMEM_RANGE_MIN" variants="A8XX-"/> + <reg64 offset="0xae12" name="SP_HLSQ_LPAC_GMEM_RANGE_MIN" variants="A8XX-"/> + <reg32 offset="0xae15" name="SP_LPAC_CPI_STATUS" variants="A8XX-"/> + <reg32 offset="0xae16" name="SP_LPAC_DBG_STATUS" variants="A8XX-"/> + <reg32 offset="0xae17" name="SP_LPAC_ISDB_BATCH_COUNT" variants="A8XX-"/> + <reg32 offset="0xae18" name="SP_LPAC_ISDB_BATCH_COUNT_INCR_EN" variants="A8XX-"/> + <reg32 offset="0xae19" name="SP_LPAC_ISDB_BATCH_COUNT_SHADERS" variants="A8XX-"/> + <reg32 offset="0xae30" name="SP_ISDB_BATCH_COUNT" variants="A7XX-"/> + <reg32 offset="0xae31" name="SP_ISDB_BATCH_COUNT_INCR_EN" variants="A7XX-"/> + <reg32 offset="0xae32" name="SP_ISDB_BATCH_COUNT_SHADERS" variants="A7XX-"/> + <reg32 offset="0xae35" name="SP_ISDB_DEBUG_CONFIG" variants="A7XX-"/> + + <reg32 offset="0xae3a" name="SP_SELF_THROTTLE_CONTROL" variants="A7XX-"/> + <reg32 offset="0xae3b" name="SP_DISPATCH_CNTL" variants="A7XX-"/> + <reg64 offset="0xae3c" name="SP_SW_DEBUG_ADDR" variants="A7XX-"/> + <reg64 offset="0xae3e" name="SP_ISDB_DEBUG_ADDR" variants="A7XX-"/> + + <array offset="0xaec0" name="SP_PERFCTR_HLSQ_SEL_2_0" stride="1" length="6" variants="A7XX-"/> <!-- The downstream kernel calls the debug cluster of registers @@ -3285,12 +4341,15 @@ by a particular renderpass/blit. color base for compute shaders. --> <reg64 offset="0xb180" name="TPL1_CS_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/> - <reg32 offset="0xb182" name="SP_UNKNOWN_B182" low="0" high="2" usage="cmd"/> - <reg32 offset="0xb183" name="SP_UNKNOWN_B183" low="0" high="23" usage="cmd"/> + <reg32 offset="0xb182" name="TPL1_PS_ROTATION_CNTL" low="0" high="2" usage="cmd"/> + <reg32 offset="0xb183" name="TPL1_PS_SWIZZLE_CNTL" low="0" high="23" usage="cmd"/> <reg32 offset="0xb190" name="SP_UNKNOWN_B190"/> <reg32 offset="0xb191" name="SP_UNKNOWN_B191"/> + <reg32 offset="0xb2d6" name="TPL1_A2D_BIN_SIZE" type="a8xx_bin_size" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0xb2d7" name="TPL1_A2D_FILTER_CNTL" variants="A8XX-" usage="rp_blit"/> + <reg32 offset="0xb300" name="TPL1_RAS_MSAA_CNTL" usage="rp_blit"> <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/> <bitfield name="UNK2" low="2" high="3"/> @@ -3303,10 +4362,12 @@ by a particular renderpass/blit. <!-- looks to work in the same way as a5xx: --> <reg64 offset="0xb302" name="TPL1_GFX_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/> <reg32 offset="0xb304" name="TPL1_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" variants="A6XX-A7XX" usage="rp_blit"/> - <reg32 offset="0xb305" name="TPL1_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/> - <reg32 offset="0xb306" name="TPL1_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/> + <reg32 offset="0xb305" name="TPL1_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit" variants="A6XX-A7XX" /> + <reg32 offset="0xb306" name="TPL1_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit" variants="A6XX-A7XX" /> <reg32 offset="0xb307" name="TPL1_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/> + <reg32 offset="0xb304" name="TPL1_BIN_SIZE" type="a8xx_bin_size" variants="A8XX-" usage="rp_blit"/> + <enum name="a6xx_coord_round"> <value value="0" name="COORD_TRUNCATE"/> <value value="1" name="COORD_ROUND_NEAREST_EVEN"/> @@ -3315,13 +4376,17 @@ by a particular renderpass/blit. <enum name="a6xx_nearest_mode"> <value value="0" name="ROUND_CLAMP_TRUNCATE"/> <value value="1" name="CLAMP_ROUND_TRUNCATE"/> + <value value="2" name="ROUND_FLOAT_TO_INT"/> <!-- only ARRAYCOORDROUNDMODE --> </enum> <reg32 offset="0xb309" name="TPL1_MODE_CNTL" usage="cmd"> <bitfield name="ISAMMODE" low="0" high="1" type="a6xx_isam_mode"/> <bitfield name="TEXCOORDROUNDMODE" pos="2" type="a6xx_coord_round"/> + <bitfield name="ARRAYCOORDROUNDMODE" low="3" high="4" type="a6xx_coord_round"/> <bitfield name="NEARESTMIPSNAP" pos="5" type="a6xx_nearest_mode"/> + <bitfield name="SAMPLEREPLICATE" pos="6" type="boolean"/> <bitfield name="DESTDATATYPEOVERRIDE" pos="7" type="boolean"/> + <bitfield name="PACK_SAMP_REDUCED_PRECISION" pos="8" type="boolean"/> </reg32> <reg32 offset="0xb310" name="SP_UNKNOWN_B310" variants="A7XX-" usage="cmd"/> @@ -3387,11 +4452,12 @@ by a particular renderpass/blit. <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/> </reg32> <reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0xab07" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/> <!-- always 0x100000 or 0x1000000? --> - <reg32 offset="0xb600" name="TPL1_DBG_ECO_CNTL" low="0" high="25" usage="cmd"/> - <reg32 offset="0xb601" name="TPL1_ADDR_MODE_CNTL" type="a5xx_address_mode"/> - <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="cmd"> + <reg32 offset="0xb600" name="TPL1_DBG_ECO_CNTL" low="0" high="25" usage="init"/> + <reg32 offset="0xb601" name="TPL1_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/> + <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="init"> <!-- Affects UBWC in some way, if BLIT_OP_SCALE is done with this bit set and if other blit is done without it - UBWC image may be copied incorrectly. --> @@ -3404,7 +4470,7 @@ by a particular renderpass/blit. <bitfield name="UPPER_BIT" pos="4" type="uint"/> <bitfield name="UNK6" low="6" high="7"/> </reg32> - <reg32 offset="0xb605" name="TPL1_UNKNOWN_B605" low="0" high="7" type="uint" variants="A6XX" usage="cmd"/> <!-- always 0x0 or 0x44 ? --> + <reg32 offset="0xb605" name="TPL1_UNKNOWN_B605" low="0" high="7" type="uint" variants="A6XX" usage="init"/> <!-- always 0x0 or 0x44 ? --> <array offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE" stride="1" length="5" variants="A6XX"> <reg32 offset="0" name="REG" low="0" high="29"/> @@ -3414,8 +4480,13 @@ by a particular renderpass/blit. <reg32 offset="0" name="REG" low="0" high="29" usage="cmd"/> </array> + <array offset="0xb606" name="TPL1_BICUBIC_WEIGHTS_TABLE" stride="1" length="25" variants="A8XX"> + <reg32 offset="0" name="REG" low="0" high="29"/> + </array> + <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12" variants="A6XX"/> <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="18" variants="A7XX"/> + <array offset="0xb620" name="TPL1_PERFCTR_TP_SEL" stride="1" length="20" variants="A8XX"/> <!-- TODO: 4 more perfcntr sel at 0xb620 ? --> @@ -3458,10 +4529,8 @@ by a particular renderpass/blit. <reg32 offset="0xa9ae" name="SP_PS_CNTL_1" variants="A7XX-" usage="rp_blit"> <bitfield name="SYSVAL_REGS_COUNT" low="0" high="7" type="uint"/> - <!-- UNK8 is set on a730/a740 --> - <bitfield name="UNK8" pos="8" type="boolean"/> - <!-- UNK9 is set on a750 --> - <bitfield name="UNK9" pos="9" type="boolean"/> + <bitfield name="DEFER_WAVE_ALLOC_DIS" pos="8" type="boolean"/> + <bitfield name="EVICT_BUF_MODE" low="9" high="10"/> </reg32> <reg32 offset="0xb820" name="HLSQ_LOAD_STATE_GEOM_CMD"/> @@ -3512,9 +4581,12 @@ by a particular renderpass/blit. <reg32 offset="0xb985" type="a6xx_sp_reg_prog_id_2" name="SP_REG_PROG_ID_2" variants="A6XX" usage="rp_blit"/> <reg32 offset="0xb986" type="a6xx_sp_reg_prog_id_3" name="SP_REG_PROG_ID_3" variants="A6XX" usage="rp_blit"/> <reg32 offset="0xb987" name="SP_CS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="cmd"/> - <reg32 offset="0xa9c6" type="a6xx_sp_ps_wave_cntl" name="SP_PS_WAVE_CNTL" variants="A7XX-" usage="rp_blit"/> + <reg32 offset="0xa9c6" type="a6xx_sp_ps_wave_cntl" name="SP_PS_WAVE_CNTL" variants="A7XX" usage="rp_blit"/> + <reg32 offset="0xa9c6" name="SP_PS_WAVE_CNTL" variants="A8XX-" usage="rp_blit"> + <bitfield name="VARYINGS" pos="1" type="boolean"/> + </reg32> <reg32 offset="0xa9c7" name="SP_LB_PARAM_LIMIT" low="0" high="2" variants="A7XX-" usage="rp_blit"> - <bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/> + <bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/> </reg32> <reg32 offset="0xa9c8" name="SP_REG_PROG_ID_0" variants="A7XX-" usage="rp_blit"> <bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/> @@ -3718,7 +4790,7 @@ by a particular renderpass/blit. <bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/> </reg32> - <reg32 offset="0xab1f" name="SP_UPDATE_CNTL" variants="A7XX-" usage="cmd"> + <reg32 offset="0xab1f" name="SP_UPDATE_CNTL" variants="A7XX" usage="cmd"> <doc> This register clears pending loads queued up by CP_LOAD_STATE6. Each bit resets a particular kind(s) of @@ -3741,10 +4813,30 @@ by a particular renderpass/blit. <bitfield name="GFX_BINDLESS" low="17" high="24" type="hex"/> </reg32> + <reg32 offset="0xab1f" name="SP_UPDATE_CNTL" variants="A8XX" usage="cmd"> + <doc> + This register clears pending loads queued up by + CP_LOAD_STATE6. Each bit resets a particular kind(s) of + CP_LOAD_STATE6. + </doc> + + <!-- per-stage state: shader, non-bindless UBO, textures, and samplers --> + <bitfield name="VS_STATE" pos="0" type="boolean"/> + <bitfield name="HS_STATE" pos="1" type="boolean"/> + <bitfield name="DS_STATE" pos="2" type="boolean"/> + <bitfield name="GS_STATE" pos="3" type="boolean"/> + <bitfield name="FS_STATE" pos="4" type="boolean"/> + <bitfield name="CS_STATE" pos="5" type="boolean"/> + </reg32> + + <reg32 offset="0xa9c0" name="SP_CS_BINDLESS_INVALIDATE"/> + <reg32 offset="0xab08" name="SP_GFX_BINDLESS_INVALIDATE"/> + <reg32 offset="0xbb10" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/> <reg32 offset="0xab03" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/> <array offset="0xab40" name="SP_SHARED_CONSTANT_GFX" stride="1" length="64" variants="A7XX"/> + <array offset="0xab30" name="SP_SHARED_CONSTANT_GFX" stride="1" length="128" variants="A8XX-"/> <reg32 offset="0xbb11" name="HLSQ_SHARED_CONSTS" variants="A6XX" usage="cmd"> <doc> @@ -3781,15 +4873,15 @@ by a particular renderpass/blit. <bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/> </reg32> - <reg32 offset="0xbe00" name="HLSQ_UNKNOWN_BE00" variants="A6XX" usage="cmd"/> <!-- all bits valid except bit 29 --> - <reg32 offset="0xbe01" name="HLSQ_UNKNOWN_BE01" low="4" high="6" variants="A6XX" usage="cmd"/> - <reg32 offset="0xbe04" name="HLSQ_DBG_ECO_CNTL" variants="A6XX" usage="cmd"/> - <reg32 offset="0xbe05" name="HLSQ_ADDR_MODE_CNTL" type="a5xx_address_mode"/> + <reg32 offset="0xbe00" name="HLSQ_UNKNOWN_BE00" variants="A6XX" usage="init"/> <!-- all bits valid except bit 29 --> + <reg32 offset="0xbe01" name="HLSQ_UNKNOWN_BE01" low="4" high="6" variants="A6XX" usage="init"/> + <reg32 offset="0xbe04" name="HLSQ_DBG_ECO_CNTL" variants="A6XX" usage="init"/> + <reg32 offset="0xbe05" name="HLSQ_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/> <reg32 offset="0xbe08" name="HLSQ_UNKNOWN_BE08" low="0" high="15"/> <array offset="0xbe10" name="HLSQ_PERFCTR_HLSQ_SEL" stride="1" length="6"/> <!-- TODO: some valid registers between 0xbe20 and 0xbe33 --> - <reg32 offset="0xbe22" name="HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE"/> + <reg32 offset="0xbe22" name="HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE" variants="A6XX"/> <reg32 offset="0xc000" name="SP_AHB_READ_APERTURE" variants="A7XX-"/> @@ -3918,6 +5010,7 @@ by a particular renderpass/blit. <reg32 offset="0x0001" name="SYSTEM_CACHE_CNTL_0"/> <reg32 offset="0x0002" name="SYSTEM_CACHE_CNTL_1"/> <reg32 offset="0x0039" name="CX_MISC_TCM_RET_CNTL" variants="A7XX-"/> + <reg32 offset="0x0087" name="CX_MISC_SLICE_ENABLE_FINAL" variants="A8XX"/> <reg32 offset="0x0400" name="CX_MISC_SW_FUSE_VALUE" variants="A7XX-"> <bitfield pos="0" name="FASTBLEND" type="boolean"/> <bitfield pos="1" name="LPAC" type="boolean"/> diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml index 4e42f055b85f..81538831dc19 100644 --- a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml @@ -303,7 +303,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> </enum> <!-- -Used in a6xx_a2d_bit_cntl.. the value mostly seems to correlate to the +Used in a6xx_a2d_blt_cntl.. the value mostly seems to correlate to the component type/size, so I think it relates to internal format used for blending? The one exception is that 16b unorm and 32b float use the same value... maybe 16b unorm is uncommon enough that it was just easier diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml index b15a242d974d..c4e00b1263cd 100644 --- a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml @@ -40,56 +40,62 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <bitfield name="IRQ_MASK_BIT" pos="0" /> </bitset> - <reg32 offset="0x80" name="GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL"/> - <reg32 offset="0x81" name="GMU_GX_SPTPRAC_POWER_CONTROL"/> - <reg32 offset="0xc00" name="GMU_CM3_ITCM_START"/> - <reg32 offset="0x1c00" name="GMU_CM3_DTCM_START"/> - <reg32 offset="0x23f0" name="GMU_NMI_CONTROL_STATUS"/> - <reg32 offset="0x23f8" name="GMU_BOOT_SLUMBER_OPTION"/> - <reg32 offset="0x23f9" name="GMU_GX_VOTE_IDX"/> - <reg32 offset="0x23fa" name="GMU_MX_VOTE_IDX"/> - <reg32 offset="0x23fc" name="GMU_DCVS_ACK_OPTION"/> - <reg32 offset="0x23fd" name="GMU_DCVS_PERF_SETTING"/> - <reg32 offset="0x23fe" name="GMU_DCVS_BW_SETTING"/> - <reg32 offset="0x23ff" name="GMU_DCVS_RETURN"/> - <reg32 offset="0x2bf8" name="GMU_CORE_FW_VERSION"> + <reg32 offset="0x1a880" name="GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL"/> + <reg32 offset="0x1a881" name="GMU_GX_SPTPRAC_POWER_CONTROL"/> + <reg32 offset="0x1b400" name="GMU_CM3_ITCM_START"/> + <reg32 offset="0x1c400" name="GMU_CM3_DTCM_START"/> + <reg32 offset="0x1cbf0" name="GMU_NMI_CONTROL_STATUS"/> + <reg32 offset="0x1cbf8" name="GMU_BOOT_SLUMBER_OPTION"/> + <reg32 offset="0x1cbf9" name="GMU_GX_VOTE_IDX"/> + <reg32 offset="0x1cbfa" name="GMU_MX_VOTE_IDX"/> + <reg32 offset="0x1cbfc" name="GMU_DCVS_ACK_OPTION"/> + <reg32 offset="0x1cbfd" name="GMU_DCVS_PERF_SETTING"/> + <reg32 offset="0x1cbfe" name="GMU_DCVS_BW_SETTING"/> + <reg32 offset="0x1cbff" name="GMU_DCVS_RETURN"/> + <reg32 offset="0x1d3f8" name="GMU_CORE_FW_VERSION"> <bitfield name="MAJOR" low="28" high="31"/> <bitfield name="MINOR" low="16" high="27"/> <bitfield name="STEP" low="0" high="15"/> </reg32> - <reg32 offset="0x4c00" name="GMU_ICACHE_CONFIG"/> - <reg32 offset="0x4c01" name="GMU_DCACHE_CONFIG"/> - <reg32 offset="0x4c0f" name="GMU_SYS_BUS_CONFIG"/> - <reg32 offset="0x5000" name="GMU_CM3_SYSRESET"/> - <reg32 offset="0x5001" name="GMU_CM3_BOOT_CONFIG"/> - <reg32 offset="0x501a" name="GMU_CM3_FW_BUSY"/> - <reg32 offset="0x501c" name="GMU_CM3_FW_INIT_RESULT"/> - <reg32 offset="0x502d" name="GMU_CM3_CFG"/> - <reg32 offset="0x5040" name="GMU_CX_GMU_POWER_COUNTER_ENABLE"/> - <reg32 offset="0x5041" name="GMU_CX_GMU_POWER_COUNTER_SELECT_0"/> - <reg32 offset="0x5042" name="GMU_CX_GMU_POWER_COUNTER_SELECT_1"/> - <reg32 offset="0x5044" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L"/> - <reg32 offset="0x5045" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H"/> - <reg32 offset="0x5046" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L"/> - <reg32 offset="0x5047" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H"/> - <reg32 offset="0x5048" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L"/> - <reg32 offset="0x5049" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H"/> - <reg32 offset="0x504a" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L"/> - <reg32 offset="0x504b" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H"/> - <reg32 offset="0x504c" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L"/> - <reg32 offset="0x504d" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H"/> - <reg32 offset="0x504e" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L"/> - <reg32 offset="0x504f" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H"/> - <reg32 offset="0x50c0" name="GMU_PWR_COL_INTER_FRAME_CTRL"> + <reg32 offset="0x1f400" name="GMU_ICACHE_CONFIG"/> + <reg32 offset="0x1f401" name="GMU_DCACHE_CONFIG"/> + <reg32 offset="0x1f40f" name="GMU_SYS_BUS_CONFIG"/> + <reg32 offset="0x1f50b" name="GMU_MRC_GBIF_QOS_CTRL"/> + <reg32 offset="0x1f800" name="GMU_CM3_SYSRESET"/> + <reg32 offset="0x1f801" name="GMU_CM3_BOOT_CONFIG"/> + <reg32 offset="0x1f81a" name="GMU_CM3_FW_BUSY"/> + <reg32 offset="0x1f81c" name="GMU_CM3_FW_INIT_RESULT"/> + <reg32 offset="0x1f82d" name="GMU_CM3_CFG"/> + <reg32 offset="0x1f840" name="GMU_CX_GMU_POWER_COUNTER_ENABLE"/> + <reg32 offset="0x1fc10" name="GMU_CX_GMU_POWER_COUNTER_ENABLE" variants="A8XX"/> + <reg32 offset="0x1f841" name="GMU_CX_GMU_POWER_COUNTER_SELECT_0"/> + <reg32 offset="0x1f842" name="GMU_CX_GMU_POWER_COUNTER_SELECT_1"/> + <reg32 offset="0x1fc40" name="GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_0" variants="A8XX-"/> + <reg32 offset="0x1fc41" name="GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_1" variants="A8XX-"/> + <reg32 offset="0x1f844" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L"/> + <reg32 offset="0x1fca0" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L" variants="A8XX-"/> + <reg32 offset="0x1f845" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H"/> + <reg32 offset="0x1fca1" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H" variants="A8XX-"/> + <reg32 offset="0x1f846" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L"/> + <reg32 offset="0x1f847" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H"/> + <reg32 offset="0x1f848" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L"/> + <reg32 offset="0x1f849" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H"/> + <reg32 offset="0x1f84a" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L"/> + <reg32 offset="0x1f84b" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H"/> + <reg32 offset="0x1f84c" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L"/> + <reg32 offset="0x1f84d" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H"/> + <reg32 offset="0x1f84e" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L"/> + <reg32 offset="0x1f84f" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H"/> + <reg32 offset="0x1f8c0" name="GMU_PWR_COL_INTER_FRAME_CTRL"> <bitfield name="IFPC_ENABLE" pos="0" type="boolean"/> <bitfield name="HM_POWER_COLLAPSE_ENABLE" pos="1" type="boolean"/> <bitfield name="SPTPRAC_POWER_CONTROL_ENABLE" pos="2" type="boolean"/> <bitfield name="NUM_PASS_SKIPS" low="10" high="13"/> <bitfield name="MIN_PASS_LENGTH" low="14" high="31"/> </reg32> - <reg32 offset="0x50c1" name="GMU_PWR_COL_INTER_FRAME_HYST"/> - <reg32 offset="0x50c2" name="GMU_PWR_COL_SPTPRAC_HYST"/> - <reg32 offset="0x50d0" name="GMU_SPTPRAC_PWR_CLK_STATUS"> + <reg32 offset="0x1f8c1" name="GMU_PWR_COL_INTER_FRAME_HYST"/> + <reg32 offset="0x1f8c2" name="GMU_PWR_COL_SPTPRAC_HYST"/> + <reg32 offset="0x1f8d0" name="GMU_SPTPRAC_PWR_CLK_STATUS" variants="A6XX"> <bitfield name="SPTPRAC_GDSC_POWERING_OFF" pos="0" type="boolean"/> <bitfield name="SPTPRAC_GDSC_POWERING_ON" pos="1" type="boolean"/> <bitfield name="SPTPRAC_GDSC_POWER_OFF" pos="2" type="boolean"/> @@ -99,15 +105,19 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <bitfield name="GX_HM_GDSC_POWER_OFF" pos="6" type="boolean"/> <bitfield name="GX_HM_CLK_OFF" pos="7" type="boolean"/> </reg32> - <reg32 offset="0x50d0" name="GMU_SPTPRAC_PWR_CLK_STATUS" variants="A7XX"> + <reg32 offset="0x1f8d0" name="GMU_SPTPRAC_PWR_CLK_STATUS" variants="A7XX"> <bitfield name="GX_HM_GDSC_POWER_OFF" pos="0" type="boolean"/> <bitfield name="GX_HM_CLK_OFF" pos="1" type="boolean"/> </reg32> - <reg32 offset="0x50e4" name="GMU_GPU_NAP_CTRL"> + <reg32 offset="0x1f7e8" name="GMU_PWR_CLK_STATUS" variants="A8XX-"> + <bitfield name="GX_HM_GDSC_POWER_OFF" pos="0" type="boolean"/> + <bitfield name="GX_HM_CLK_OFF" pos="1" type="boolean"/> + </reg32> + <reg32 offset="0x1f8e4" name="GMU_GPU_NAP_CTRL"> <bitfield name="HW_NAP_ENABLE" pos="0"/> <bitfield name="SID" low="4" high="8"/> </reg32> - <reg32 offset="0x50e8" name="GMU_RPMH_CTRL"> + <reg32 offset="0x1f8e8" name="GMU_RPMH_CTRL"> <bitfield name="RPMH_INTERFACE_ENABLE" pos="0" type="boolean"/> <bitfield name="LLC_VOTE_ENABLE" pos="4" type="boolean"/> <bitfield name="DDR_VOTE_ENABLE" pos="8" type="boolean"/> @@ -119,71 +129,84 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <bitfield name="CX_MIN_VOTE_ENABLE" pos="14" type="boolean"/> <bitfield name="GFX_MIN_VOTE_ENABLE" pos="15" type="boolean"/> </reg32> - <reg32 offset="0x50e9" name="GMU_RPMH_HYST_CTRL"/> - <reg32 offset="0x50ec" name="GPU_GMU_CX_GMU_RPMH_POWER_STATE"/> - <reg32 offset="0x50f0" name="GPU_GMU_CX_GMU_CX_FAL_INTF"/> - <reg32 offset="0x50f1" name="GPU_GMU_CX_GMU_CX_FALNEXT_INTF"/> - <reg32 offset="0x5100" name="GPU_GMU_CX_GMU_PWR_COL_CP_MSG"/> - <reg32 offset="0x5101" name="GPU_GMU_CX_GMU_PWR_COL_CP_RESP"/> - <reg32 offset="0x51f0" name="GMU_BOOT_KMD_LM_HANDSHAKE"/> - <reg32 offset="0x5157" name="GMU_LLM_GLM_SLEEP_CTRL"/> - <reg32 offset="0x5158" name="GMU_LLM_GLM_SLEEP_STATUS"/> - <reg32 offset="0x5088" name="GMU_ALWAYS_ON_COUNTER_L"/> - <reg32 offset="0x5089" name="GMU_ALWAYS_ON_COUNTER_H"/> - <reg32 offset="0x50c3" name="GMU_GMU_PWR_COL_KEEPALIVE"/> - <reg32 offset="0x50c4" name="GMU_PWR_COL_PREEMPT_KEEPALIVE"/> - <reg32 offset="0x5180" name="GMU_HFI_CTRL_STATUS"/> - <reg32 offset="0x5181" name="GMU_HFI_VERSION_INFO"/> - <reg32 offset="0x5182" name="GMU_HFI_SFR_ADDR"/> - <reg32 offset="0x5183" name="GMU_HFI_MMAP_ADDR"/> - <reg32 offset="0x5184" name="GMU_HFI_QTBL_INFO"/> - <reg32 offset="0x5185" name="GMU_HFI_QTBL_ADDR"/> - <reg32 offset="0x5186" name="GMU_HFI_CTRL_INIT"/> - <reg32 offset="0x5190" name="GMU_GMU2HOST_INTR_SET"/> - <reg32 offset="0x5191" name="GMU_GMU2HOST_INTR_CLR"/> - <reg32 offset="0x5192" name="GMU_GMU2HOST_INTR_INFO"> + <reg32 offset="0x1f8e9" name="GMU_RPMH_HYST_CTRL"/> + <reg32 offset="0x1f8ec" name="GPU_GMU_CX_GMU_RPMH_POWER_STATE" variants="A6XX"/> + <reg32 offset="0x1f7e9" name="GPU_GMU_CX_GMU_RPMH_POWER_STATE" variants="A8XX-"/> + <reg32 offset="0x1f8f0" name="GPU_GMU_CX_GMU_CX_FAL_INTF" variants="A6XX"/> + <reg32 offset="0x1f7ec" name="GPU_GMU_CX_GMU_CX_FAL_INTF" variants="A8XX-"/> + <reg32 offset="0x1f8f1" name="GPU_GMU_CX_GMU_CX_FALNEXT_INTF" variants="A6XX"/> + <reg32 offset="0x1f7ed" name="GPU_GMU_CX_GMU_CX_FALNEXT_INTF" variants="A8XX-"/> + <reg32 offset="0x1f900" name="GPU_GMU_CX_GMU_PWR_COL_CP_MSG"/> + <reg32 offset="0x1f901" name="GPU_GMU_CX_GMU_PWR_COL_CP_RESP"/> + <reg32 offset="0x1f9f0" name="GMU_BOOT_KMD_LM_HANDSHAKE"/> + <reg32 offset="0x1f957" name="GMU_LLM_GLM_SLEEP_CTRL"/> + <reg32 offset="0x1f958" name="GMU_LLM_GLM_SLEEP_STATUS"/> + <reg32 offset="0x1f888" name="GMU_ALWAYS_ON_COUNTER_L"/> + <reg32 offset="0x1f889" name="GMU_ALWAYS_ON_COUNTER_H"/> + <reg32 offset="0x1f8c3" name="GMU_GMU_PWR_COL_KEEPALIVE" variants="A6XX-A7XX"/> + <reg32 offset="0x1f7e4" name="GMU_GMU_PWR_COL_KEEPALIVE" variants="A8XX-"/> + <reg32 offset="0x1f8c4" name="GMU_PWR_COL_PREEMPT_KEEPALIVE" variants="A6XX-A7XX"/> + <reg32 offset="0x1f7e5" name="GMU_PWR_COL_PREEMPT_KEEPALIVE" variants="A8XX-"/> + <reg32 offset="0x1f980" name="GMU_HFI_CTRL_STATUS"/> + <reg32 offset="0x1f981" name="GMU_HFI_VERSION_INFO"/> + <reg32 offset="0x1f982" name="GMU_HFI_SFR_ADDR"/> + <reg32 offset="0x1f983" name="GMU_HFI_MMAP_ADDR"/> + <reg32 offset="0x1f984" name="GMU_HFI_QTBL_INFO"/> + <reg32 offset="0x1f985" name="GMU_HFI_QTBL_ADDR"/> + <reg32 offset="0x1f986" name="GMU_HFI_CTRL_INIT"/> + <reg32 offset="0x1f990" name="GMU_GMU2HOST_INTR_SET"/> + <reg32 offset="0x1f991" name="GMU_GMU2HOST_INTR_CLR"/> + <reg32 offset="0x1f992" name="GMU_GMU2HOST_INTR_INFO"> <bitfield name="MSGQ" pos="0" type="boolean"/> <bitfield name="CM3_FAULT" pos="23" type="boolean"/> </reg32> - <reg32 offset="0x5193" name="GMU_GMU2HOST_INTR_MASK"/> - <reg32 offset="0x5194" name="GMU_HOST2GMU_INTR_SET"/> - <reg32 offset="0x5195" name="GMU_HOST2GMU_INTR_CLR"/> - <reg32 offset="0x5196" name="GMU_HOST2GMU_INTR_RAW_INFO"/> - <reg32 offset="0x5197" name="GMU_HOST2GMU_INTR_EN_0"/> - <reg32 offset="0x5198" name="GMU_HOST2GMU_INTR_EN_1"/> - <reg32 offset="0x5199" name="GMU_HOST2GMU_INTR_EN_2"/> - <reg32 offset="0x519a" name="GMU_HOST2GMU_INTR_EN_3"/> - <reg32 offset="0x519b" name="GMU_HOST2GMU_INTR_INFO_0"/> - <reg32 offset="0x519c" name="GMU_HOST2GMU_INTR_INFO_1"/> - <reg32 offset="0x519d" name="GMU_HOST2GMU_INTR_INFO_2"/> - <reg32 offset="0x519e" name="GMU_HOST2GMU_INTR_INFO_3"/> - <reg32 offset="0x51c5" name="GMU_GENERAL_0"/> - <reg32 offset="0x51c6" name="GMU_GENERAL_1"/> - <reg32 offset="0x51cb" name="GMU_GENERAL_6"/> - <reg32 offset="0x51cc" name="GMU_GENERAL_7"/> - <reg32 offset="0x51cd" name="GMU_GENERAL_8" variants="A7XX"/> - <reg32 offset="0x51ce" name="GMU_GENERAL_9" variants="A7XX"/> - <reg32 offset="0x51cf" name="GMU_GENERAL_10" variants="A7XX"/> - <reg32 offset="0x515d" name="GMU_ISENSE_CTRL"/> - <reg32 offset="0x8920" name="GPU_CS_ENABLE_REG"/> - <reg32 offset="0x515d" name="GPU_GMU_CX_GMU_ISENSE_CTRL"/> - <reg32 offset="0x8578" name="GPU_CS_AMP_CALIBRATION_CONTROL3"/> - <reg32 offset="0x8558" name="GPU_CS_AMP_CALIBRATION_CONTROL2"/> - <reg32 offset="0x8580" name="GPU_CS_A_SENSOR_CTRL_0"/> - <reg32 offset="0x27ada" name="GPU_CS_A_SENSOR_CTRL_2"/> - <reg32 offset="0x881a" name="GPU_CS_SENSOR_GENERAL_STATUS"/> - <reg32 offset="0x8957" name="GPU_CS_AMP_CALIBRATION_CONTROL1"/> - <reg32 offset="0x881a" name="GPU_CS_SENSOR_GENERAL_STATUS"/> - <reg32 offset="0x881d" name="GPU_CS_AMP_CALIBRATION_STATUS1_0"/> - <reg32 offset="0x881f" name="GPU_CS_AMP_CALIBRATION_STATUS1_2"/> - <reg32 offset="0x8821" name="GPU_CS_AMP_CALIBRATION_STATUS1_4"/> - <reg32 offset="0x8965" name="GPU_CS_AMP_CALIBRATION_DONE"/> - <reg32 offset="0x896d" name="GPU_CS_AMP_PERIOD_CTRL"/> - <reg32 offset="0x8965" name="GPU_CS_AMP_CALIBRATION_DONE"/> - <reg32 offset="0x514d" name="GPU_GMU_CX_GMU_PWR_THRESHOLD"/> - <reg32 offset="0x9303" name="GMU_AO_INTERRUPT_EN"/> - <reg32 offset="0x9304" name="GMU_AO_HOST_INTERRUPT_CLR"/> - <reg32 offset="0x9305" name="GMU_AO_HOST_INTERRUPT_STATUS"> + <reg32 offset="0x1f993" name="GMU_GMU2HOST_INTR_MASK"/> + <reg32 offset="0x1f994" name="GMU_HOST2GMU_INTR_SET"/> + <reg32 offset="0x1f995" name="GMU_HOST2GMU_INTR_CLR"/> + <reg32 offset="0x1f996" name="GMU_HOST2GMU_INTR_RAW_INFO"/> + <reg32 offset="0x1f997" name="GMU_HOST2GMU_INTR_EN_0"/> + <reg32 offset="0x1f998" name="GMU_HOST2GMU_INTR_EN_1"/> + <reg32 offset="0x1f999" name="GMU_HOST2GMU_INTR_EN_2"/> + <reg32 offset="0x1f99a" name="GMU_HOST2GMU_INTR_EN_3"/> + <reg32 offset="0x1f99b" name="GMU_HOST2GMU_INTR_INFO_0"/> + <reg32 offset="0x1f99c" name="GMU_HOST2GMU_INTR_INFO_1"/> + <reg32 offset="0x1f99d" name="GMU_HOST2GMU_INTR_INFO_2"/> + <reg32 offset="0x1f99e" name="GMU_HOST2GMU_INTR_INFO_3"/> + <reg32 offset="0x1f9c5" name="GMU_GENERAL_0"/> + <reg32 offset="0x1f9c6" name="GMU_GENERAL_1"/> + <reg32 offset="0x1f9cb" name="GMU_GENERAL_6"/> + <reg32 offset="0x1f9cc" name="GMU_GENERAL_7"/> + <reg32 offset="0x1f9cd" name="GMU_GENERAL_8" variants="A7XX"/> + <reg32 offset="0x1f9ce" name="GMU_GENERAL_9" variants="A7XX"/> + <reg32 offset="0x1f9cf" name="GMU_GENERAL_10" variants="A7XX"/> + <reg32 offset="0x1f9c0" name="GMU_GENERAL_0" variants="A8XX"/> + <reg32 offset="0x1f9c1" name="GMU_GENERAL_1" variants="A8XX"/> + <reg32 offset="0x1f9c6" name="GMU_GENERAL_6" variants="A8XX"/> + <reg32 offset="0x1f9c7" name="GMU_GENERAL_7" variants="A8XX"/> + <reg32 offset="0x1f9c8" name="GMU_GENERAL_8" variants="A8XX"/> + <reg32 offset="0x1f9c9" name="GMU_GENERAL_9" variants="A8XX"/> + <reg32 offset="0x1f9ca" name="GMU_GENERAL_10" variants="A8XX"/> + <reg32 offset="0x1f9cb" name="GMU_GENERAL_11" variants="A8XX"/> + <reg32 offset="0x1f95d" name="GMU_ISENSE_CTRL"/> + <reg32 offset="0x23120" name="GPU_CS_ENABLE_REG"/> + <reg32 offset="0x1f95d" name="GPU_GMU_CX_GMU_ISENSE_CTRL"/> + <reg32 offset="0x22d78" name="GPU_CS_AMP_CALIBRATION_CONTROL3"/> + <reg32 offset="0x22d58" name="GPU_CS_AMP_CALIBRATION_CONTROL2"/> + <reg32 offset="0x22d80" name="GPU_CS_A_SENSOR_CTRL_0"/> + <reg32 offset="0x422da" name="GPU_CS_A_SENSOR_CTRL_2"/> + <reg32 offset="0x2301a" name="GPU_CS_SENSOR_GENERAL_STATUS"/> + <reg32 offset="0x23157" name="GPU_CS_AMP_CALIBRATION_CONTROL1"/> + <reg32 offset="0x2301a" name="GPU_CS_SENSOR_GENERAL_STATUS"/> + <reg32 offset="0x2301d" name="GPU_CS_AMP_CALIBRATION_STATUS1_0"/> + <reg32 offset="0x2301f" name="GPU_CS_AMP_CALIBRATION_STATUS1_2"/> + <reg32 offset="0x23021" name="GPU_CS_AMP_CALIBRATION_STATUS1_4"/> + <reg32 offset="0x23165" name="GPU_CS_AMP_CALIBRATION_DONE"/> + <reg32 offset="0x2316d" name="GPU_CS_AMP_PERIOD_CTRL"/> + <reg32 offset="0x23165" name="GPU_CS_AMP_CALIBRATION_DONE"/> + <reg32 offset="0x1f94d" name="GPU_GMU_CX_GMU_PWR_THRESHOLD"/> + <reg32 offset="0x23b03" name="GMU_AO_INTERRUPT_EN"/> + <reg32 offset="0x23b04" name="GMU_AO_HOST_INTERRUPT_CLR"/> + <reg32 offset="0x23b05" name="GMU_AO_HOST_INTERRUPT_STATUS"> <bitfield name="WDOG_BITE" pos="0" type="boolean"/> <bitfield name="RSCC_COMP" pos="1" type="boolean"/> <bitfield name="VDROOP" pos="2" type="boolean"/> @@ -191,27 +214,27 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <bitfield name="DBD_WAKEUP" pos="4" type="boolean"/> <bitfield name="HOST_AHB_BUS_ERROR" pos="5" type="boolean"/> </reg32> - <reg32 offset="0x9306" name="GMU_AO_HOST_INTERRUPT_MASK"/> - <reg32 offset="0x9309" name="GPU_GMU_AO_GMU_CGC_MODE_CNTL"/> - <reg32 offset="0x930a" name="GPU_GMU_AO_GMU_CGC_DELAY_CNTL"/> - <reg32 offset="0x930b" name="GPU_GMU_AO_GMU_CGC_HYST_CNTL"/> - <reg32 offset="0x930c" name="GPU_GMU_AO_GPU_CX_BUSY_STATUS"> + <reg32 offset="0x23b06" name="GMU_AO_HOST_INTERRUPT_MASK"/> + <reg32 offset="0x23b09" name="GPU_GMU_AO_GMU_CGC_MODE_CNTL"/> + <reg32 offset="0x23b0a" name="GPU_GMU_AO_GMU_CGC_DELAY_CNTL"/> + <reg32 offset="0x23b0b" name="GPU_GMU_AO_GMU_CGC_HYST_CNTL"/> + <reg32 offset="0x23b0c" name="GPU_GMU_AO_GPU_CX_BUSY_STATUS"> <bitfield name = "GPUBUSYIGNAHB" pos="23" type="boolean"/> </reg32> - <reg32 offset="0x930d" name="GPU_GMU_AO_GPU_CX_BUSY_STATUS2"/> - <reg32 offset="0x930e" name="GPU_GMU_AO_GPU_CX_BUSY_MASK"/> - <reg32 offset="0x9310" name="GMU_AO_AHB_FENCE_CTRL"/> - <reg32 offset="0x9313" name="GMU_AHB_FENCE_STATUS"/> - <reg32 offset="0x9314" name="GMU_AHB_FENCE_STATUS_CLR"/> - <reg32 offset="0x9315" name="GMU_RBBM_INT_UNMASKED_STATUS"/> - <reg32 offset="0x9316" name="GMU_AO_SPARE_CNTL"/> - <reg32 offset="0x9307" name="GMU_RSCC_CONTROL_REQ"/> - <reg32 offset="0x9308" name="GMU_RSCC_CONTROL_ACK"/> - <reg32 offset="0x9311" name="GMU_AHB_FENCE_RANGE_0"/> - <reg32 offset="0x9312" name="GMU_AHB_FENCE_RANGE_1"/> - <reg32 offset="0x9c03" name="GPU_CC_GX_GDSCR"/> - <reg32 offset="0x9d42" name="GPU_CC_GX_DOMAIN_MISC"/> - <reg32 offset="0xc001" name="GPU_CPR_FSM_CTL"/> + <reg32 offset="0x23b0d" name="GPU_GMU_AO_GPU_CX_BUSY_STATUS2"/> + <reg32 offset="0x23b0e" name="GPU_GMU_AO_GPU_CX_BUSY_MASK"/> + <reg32 offset="0x23b10" name="GMU_AO_AHB_FENCE_CTRL"/> + <reg32 offset="0x23b13" name="GMU_AHB_FENCE_STATUS"/> + <reg32 offset="0x23b14" name="GMU_AHB_FENCE_STATUS_CLR"/> + <reg32 offset="0x23b15" name="GMU_RBBM_INT_UNMASKED_STATUS"/> + <reg32 offset="0x23b16" name="GMU_AO_SPARE_CNTL"/> + <reg32 offset="0x23b07" name="GMU_RSCC_CONTROL_REQ"/> + <reg32 offset="0x23b08" name="GMU_RSCC_CONTROL_ACK"/> + <reg32 offset="0x23b11" name="GMU_AHB_FENCE_RANGE_0"/> + <reg32 offset="0x23b12" name="GMU_AHB_FENCE_RANGE_1"/> + <reg32 offset="0x24403" name="GPU_CC_GX_GDSCR"/> + <reg32 offset="0x24542" name="GPU_CC_GX_DOMAIN_MISC"/> + <reg32 offset="0x26801" name="GPU_CPR_FSM_CTL"/> <!-- starts at offset 0x8c00 on most gpus --> <reg32 offset="0x0004" name="GPU_RSCC_RSC_STATUS0_DRV0"/> @@ -233,12 +256,12 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <reg32 offset="0x03ee" name="RSCC_TCS1_DRV0_STATUS"/> <reg32 offset="0x0496" name="RSCC_TCS2_DRV0_STATUS"/> <reg32 offset="0x053e" name="RSCC_TCS3_DRV0_STATUS"/> - <reg32 offset="0x05e6" name="RSCC_TCS4_DRV0_STATUS" variants="A7XX"/> - <reg32 offset="0x068e" name="RSCC_TCS5_DRV0_STATUS" variants="A7XX"/> - <reg32 offset="0x0736" name="RSCC_TCS6_DRV0_STATUS" variants="A7XX"/> - <reg32 offset="0x07de" name="RSCC_TCS7_DRV0_STATUS" variants="A7XX"/> - <reg32 offset="0x0886" name="RSCC_TCS8_DRV0_STATUS" variants="A7XX"/> - <reg32 offset="0x092e" name="RSCC_TCS9_DRV0_STATUS" variants="A7XX"/> + <reg32 offset="0x05e6" name="RSCC_TCS4_DRV0_STATUS" variants="A7XX-"/> + <reg32 offset="0x068e" name="RSCC_TCS5_DRV0_STATUS" variants="A7XX-"/> + <reg32 offset="0x0736" name="RSCC_TCS6_DRV0_STATUS" variants="A7XX-"/> + <reg32 offset="0x07de" name="RSCC_TCS7_DRV0_STATUS" variants="A7XX-"/> + <reg32 offset="0x0886" name="RSCC_TCS8_DRV0_STATUS" variants="A7XX-"/> + <reg32 offset="0x092e" name="RSCC_TCS9_DRV0_STATUS" variants="A7XX-"/> </domain> </database> diff --git a/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml index 661b0dd0f675..8d195ee5d284 100644 --- a/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml +++ b/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml @@ -93,13 +93,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <value value="4" name="A7XX_HLSQ_DP_STR"/> </enum> -<enum name="a7xx_pipe"> - <value value="0" name="A7XX_PIPE_NONE"/> - <value value="1" name="A7XX_PIPE_BR"/> - <value value="2" name="A7XX_PIPE_BV"/> - <value value="3" name="A7XX_PIPE_LPAC"/> -</enum> - <enum name="a7xx_cluster"> <value value="0" name="A7XX_CLUSTER_NONE"/> <value value="1" name="A7XX_CLUSTER_FE"/> diff --git a/drivers/gpu/drm/msm/registers/adreno/a8xx_descriptors.xml b/drivers/gpu/drm/msm/registers/adreno/a8xx_descriptors.xml new file mode 100644 index 000000000000..edcbdb3b6921 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/adreno/a8xx_descriptors.xml @@ -0,0 +1,121 @@ +<?xml version="1.0" encoding="UTF-8"?> +<database xmlns="http://nouveau.freedesktop.org/" +xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" +xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> +<import file="freedreno_copyright.xml"/> +<import file="adreno/adreno_common.xml"/> +<import file="adreno/adreno_pm4.xml"/> +<import file="adreno/a6xx_enums.xml"/> +<import file="adreno/a8xx_enums.xml"/> + +<domain name="A8XX_TEX_SAMP" width="32"> + <doc>Texture sampler dwords</doc> + <reg32 offset="0" name="0"> + <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/> + <bitfield name="MIPMAPING_DIS" pos="1" type="boolean"/> + <bitfield name="XY_MAG" low="2" high="3" type="a6xx_tex_filter"/> + <bitfield name="XY_MIN" low="4" high="5" type="a6xx_tex_filter"/> + <bitfield name="WRAP_S" low="6" high="8" type="a6xx_tex_clamp"/> + <bitfield name="WRAP_T" low="9" high="11" type="a6xx_tex_clamp"/> + <bitfield name="WRAP_R" low="12" high="14" type="a6xx_tex_clamp"/> + <bitfield name="MSAA_BOX_FILTERING" pos="15" type="boolean"/> + <bitfield name="LOD_BIAS" low="16" high="28" type="fixed" radix="8"/> + <bitfield name="ANISO" low="29" high="31" type="a6xx_tex_aniso"/> + </reg32> + <reg32 offset="1" name="1"> + <bitfield name="MAX_LOD" low="0" high="11" type="ufixed" radix="8"/> + <bitfield name="MIN_LOD" low="12" high="23" type="ufixed" radix="8"/> + <bitfield name="REDUCTION_MODE" low="24" high="25" type="a6xx_reduction_mode"/> + <bitfield name="COMPARE_FUNC" low="26" high="28" type="adreno_compare_func"/> + <bitfield name="CHROMA_LINEAR" pos="29" type="boolean"/> + <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="30" type="boolean"/> + <bitfield name="UNNORM_COORDS" pos="31" type="boolean"/> + </reg32> + <reg32 offset="2" name="2"> + <bitfield name="FASTBORDERCOLOREN" pos="0" type="boolean"/> + <bitfield name="FASTBORDERCOLOR" low="1" high="2" type="a6xx_fast_border_color"/> + <bitfield name="BCOLOR" low="7" high="31"/> + </reg32> + <reg32 offset="3" name="3"/> +</domain> + +<domain name="A8XX_TEX_MEMOBJ" width="32" varset="chip"> + <doc>Texture memobj dwords</doc> + <reg32 offset="0" name="0"> + <bitfield name="BASE_LO" low="6" high="31" shr="6"/> + </reg32> + <reg32 offset="1" name="1"> + <bitfield name="BASE_HI" low="0" high="16"/> + <bitfield name="TYPE" low="17" high="19" type="a6xx_tex_type"/> + <bitfield name="DEPTH" low="20" high="31" type="uint"/> + </reg32> + <reg32 offset="2" name="2"> + <bitfield name="WIDTH" low="0" high="14" type="uint"/> + <bitfield name="HEIGHT" low="15" high="29" type="uint"/> + <bitfield name="SAMPLES" low="30" high="31" type="a3xx_msaa_samples"/> + </reg32> + <reg32 offset="3" name="3"> + <bitfield name="FMT" low="0" high="7" type="a6xx_format"/> + <bitfield name="SWAP" low="8" high="9" type="a3xx_color_swap"/> + <bitfield name="SWIZ_X" low="10" high="12" type="a8xx_tex_swiz"/> + <bitfield name="SWIZ_Y" low="13" high="15" type="a8xx_tex_swiz"/> + <bitfield name="SWIZ_Z" low="16" high="18" type="a8xx_tex_swiz"/> + <bitfield name="SWIZ_W" low="19" high="21" type="a8xx_tex_swiz"/> + </reg32> + <reg32 offset="4" name="4"> + <bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/> + <bitfield name="FLAG" pos="2" type="boolean"/> + <bitfield name="PRT_EN" pos="3" type="boolean"/> + <bitfield name="TILE_ALL" pos="4" type="boolean"/> + <bitfield name="SRGB" pos="5" type="boolean"/> + <bitfield name="FLAG_LO" low="6" high="31" shr="6"/> + <!-- For multiplanar: --> + <bitfield name="BASE_U_LO" low="6" high="31" shr="6"/> + </reg32> + <reg32 offset="5" name="5"> + <bitfield name="FLAG_HI" low="0" high="16"/> + <!-- For multiplanar: --> + <bitfield name="BASE_U_HI" low="0" high="16"/> + <bitfield name="FLAG_BUFFER_PITCH" low="17" high="24" shr="6" type="uint"/> + <bitfield name="ALL_SAMPLES_CENTER" pos="29" type="boolean"/> + <bitfield name="MUTABLEEN" pos="31" type="boolean"/> + </reg32> + <reg32 offset="6" name="6"> + <bitfield name="TEX_LINE_OFFSET" low="0" high="23" type="uint"/> <!-- PITCH --> + <bitfield name="MIN_LINE_OFFSET" low="24" high="27" type="uint"/> <!-- PITCHALIGN --> + <bitfield name="MIPLVLS" low="28" high="31" type="uint"/> + </reg32> + <reg32 offset="7" name="7"> + <bitfield name="ARRAY_SLICE_OFFSET" low="0" high="22" shr="12" type="uint"/> <!-- ARRAY_PITCH --> + <bitfield name="ASO_UNIT" pos="23"/> <!-- 4KB or 32B ? --> + <bitfield name="MIN_ARRAY_SLIZE_OFFSET" low="24" high="27" shr="12"/> <!-- MIN_LAYERSZ --> + <bitfield name="GMEM_TILING_FALLBACK_EN" pos="28" type="boolean"/> + <bitfield name="CORNER_BASED_EN" pos="30" type="boolean"/> + <bitfield name="GMEM_FULL_SURF" pos="31" type="boolean"/> + <!-- For multiplanar. This overlaps other single-planar fields: --> + <bitfield name="UV_OFFSET_H" low="24" high="25" type="ufixed" radix="2"/> <!-- CHROMA_MIDPOINT_X --> + <bitfield name="UV_OFFSET_V" low="26" high="27" type="ufixed" radix="2"/> <!-- CHROMA_MIDPOINT_Y --> + </reg32> + <reg32 offset="8" name="8"> + <bitfield name="FLAG_ARRAY_PITCH" low="0" high="14" shr="12" type="uint"/> <!-- FLAG_BUFFER_ARRAY_PITCH --> + <!-- log2 size of the first level, required for mipmapping --> + <bitfield name="FLAG_BUFFER_LOGW" low="24" high="27" type="uint"/> + <bitfield name="FLAG_BUFFER_LOGH" low="28" high="31" type="uint"/> + <!-- For multiplanar. This overlaps other single-planar fields: --> + <bitfield name="BASE_V_LO" low="6" high="31" shr="6"/> + </reg32> + <reg32 offset="9" name="9"> + <bitfield name="MIN_LOD_CLAMP" low="19" high="30" type="ufixed" radix="8"/> + <!-- For multiplanar, this overlaps other fields: --> + <bitfield name="BASE_V_HI" low="0" high="16"/> + <bitfield name="UV_PITCH" low="17" high="26"/> <!-- PLANE_PITCH --> + </reg32> + <reg32 offset="10" name="10"/> + <reg32 offset="11" name="11"/> + <reg32 offset="12" name="12"/> + <reg32 offset="13" name="13"/> + <reg32 offset="14" name="14"/> + <reg32 offset="15" name="15"/> +</domain> + +</database> diff --git a/drivers/gpu/drm/msm/registers/adreno/a8xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a8xx_enums.xml new file mode 100644 index 000000000000..c842db8c78d6 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/adreno/a8xx_enums.xml @@ -0,0 +1,299 @@ +<?xml version="1.0" encoding="UTF-8"?> +<database xmlns="http://nouveau.freedesktop.org/" +xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" +xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> +<import file="freedreno_copyright.xml"/> +<import file="adreno/adreno_common.xml"/> +<import file="adreno/adreno_pm4.xml"/> + +<enum name="a8xx_statetype_id"> + <value value="0" name="A8XX_TP0_NCTX_REG"/> + <value value="1" name="A8XX_TP0_CTX0_3D_CVS_REG"/> + <value value="2" name="A8XX_TP0_CTX0_3D_CPS_REG"/> + <value value="3" name="A8XX_TP0_CTX1_3D_CVS_REG"/> + <value value="4" name="A8XX_TP0_CTX1_3D_CPS_REG"/> + <value value="5" name="A8XX_TP0_CTX2_3D_CPS_REG"/> + <value value="6" name="A8XX_TP0_CTX3_3D_CPS_REG"/> + <value value="9" name="A8XX_TP0_TMO_DATA"/> + <value value="10" name="A8XX_TP0_SMO_DATA"/> + <value value="11" name="A8XX_TP0_MIPMAP_BASE_DATA"/> + <value value="12" name="A8XX_TP_3D_CVS_REG"/> + <value value="13" name="A8XX_TP_3D_CPS_REG"/> + <value value="16" name="A8XX_SP_3D_CVS_REG"/> + <value value="17" name="A8XX_SP_3D_CPS_REG"/> + <value value="22" name="A8XX_SP_LB_DATA_RAM"/> + <value value="23" name="A8XX_SP_INST_DATA_RAM"/> + <value value="24" name="A8XX_SP_STH"/> + <value value="25" name="A8XX_SP_EVQ"/> + <value value="26" name="A8XX_SP_CONSMNG"/> + <value value="30" name="A8XX_HLSQ_INST_DATA_RAM"/> + <value value="31" name="A8XX_SP_INST_DATA_3"/> + <value value="32" name="A8XX_SP_NCTX_REG"/> + <value value="33" name="A8XX_SP_CTX0_3D_CVS_REG"/> + <value value="34" name="A8XX_SP_CTX0_3D_CPS_REG"/> + <value value="35" name="A8XX_SP_CTX1_3D_CVS_REG"/> + <value value="36" name="A8XX_SP_CTX1_3D_CPS_REG"/> + <value value="37" name="A8XX_SP_CTX2_3D_CPS_REG"/> + <value value="38" name="A8XX_SP_CTX3_3D_CPS_REG"/> + <value value="39" name="A8XX_SP_INST_DATA"/> + <value value="40" name="A8XX_SP_INST_DATA_1"/> + <value value="41" name="A8XX_SP_LB_0_DATA"/> + <value value="42" name="A8XX_SP_LB_1_DATA"/> + <value value="43" name="A8XX_SP_LB_2_DATA"/> + <value value="44" name="A8XX_SP_LB_3_DATA"/> + <value value="45" name="A8XX_SP_LB_4_DATA"/> + <value value="46" name="A8XX_SP_LB_5_DATA"/> + <value value="47" name="A8XX_SP_LB_6_DATA"/> + <value value="48" name="A8XX_SP_LB_7_DATA"/> + <value value="49" name="A8XX_SP_CB_RAM"/> + <value value="50" name="A8XX_SP_LB_13_DATA"/> + <value value="51" name="A8XX_SP_LB_14_DATA"/> + <value value="52" name="A8XX_SP_INST_TAG"/> + <value value="53" name="A8XX_SP_INST_DATA_2"/> + <value value="54" name="A8XX_SP_TMO_TAG"/> + <value value="55" name="A8XX_SP_SMO_TAG"/> + <value value="56" name="A8XX_SP_STATE_DATA"/> + <value value="57" name="A8XX_SP_HWAVE_RAM"/> + <value value="58" name="A8XX_SP_L0_INST_BUF"/> + <value value="59" name="A8XX_SP_LB_8_DATA"/> + <value value="60" name="A8XX_SP_LB_9_DATA"/> + <value value="61" name="A8XX_SP_LB_10_DATA"/> + <value value="62" name="A8XX_SP_LB_11_DATA"/> + <value value="63" name="A8XX_SP_LB_12_DATA"/> + <value value="64" name="A8XX_HLSQ_DATAPATH_DSTR_META"/> + <value value="65" name="A8XX_HLSQ_DESC_REMAP_META"/> + <value value="66" name="A8XX_HLSQ_SLICE_TOP_META"/> + <value value="67" name="A8XX_HLSQ_L2STC_TAG_RAM"/> + <value value="68" name="A8XX_HLSQ_L2STC_INFO_CMD"/> + <value value="69" name="A8XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG"/> + <value value="70" name="A8XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG"/> + <value value="71" name="A8XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM"/> + <value value="72" name="A8XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM"/> + <value value="73" name="A8XX_HLSQ_CHUNK_CVS_RAM"/> + <value value="74" name="A8XX_HLSQ_CHUNK_CPS_RAM"/> + <value value="75" name="A8XX_HLSQ_CHUNK_CVS_RAM_TAG"/> + <value value="76" name="A8XX_HLSQ_CHUNK_CPS_RAM_TAG"/> + <value value="77" name="A8XX_HLSQ_ICB_CVS_CB_BASE_TAG"/> + <value value="78" name="A8XX_HLSQ_ICB_CPS_CB_BASE_TAG"/> + <value value="79" name="A8XX_HLSQ_CVS_MISC_RAM"/> + <value value="80" name="A8XX_HLSQ_CPS_MISC_RAM"/> + <value value="81" name="A8XX_HLSQ_CPS_MISC_RAM_1"/> + <value value="82" name="A8XX_HLSQ_INST_RAM"/> + <value value="83" name="A8XX_HLSQ_GFX_CVS_CONST_RAM"/> + <value value="84" name="A8XX_HLSQ_GFX_CPS_CONST_RAM"/> + <value value="85" name="A8XX_HLSQ_CVS_MISC_RAM_TAG"/> + <value value="86" name="A8XX_HLSQ_CPS_MISC_RAM_TAG"/> + <value value="87" name="A8XX_HLSQ_INST_RAM_TAG"/> + <value value="88" name="A8XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/> + <value value="89" name="A8XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/> + <value value="90" name="A8XX_HLSQ_GFX_LOCAL_MISC_RAM"/> + <value value="91" name="A8XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG"/> + <value value="92" name="A8XX_HLSQ_INST_RAM_1"/> + <value value="93" name="A8XX_HLSQ_STPROC_META"/> + <value value="94" name="A8XX_HLSQ_SLICE_BACKEND_META"/> + <value value="95" name="A8XX_HLSQ_INST_RAM_2"/> + <value value="96" name="A8XX_HLSQ_DATAPATH_META"/> + <value value="97" name="A8XX_HLSQ_FRONTEND_META"/> + <value value="98" name="A8XX_HLSQ_INDIRECT_META"/> + <value value="99" name="A8XX_HLSQ_BACKEND_META"/> +</enum> + +<enum name="a8xx_state_location"> + <value value="0" name="A8XX_HLSQ_STATE"/> + <value value="1" name="A8XX_HLSQ_DP"/> + <value value="2" name="A8XX_SP_TOP"/> + <value value="3" name="A8XX_USPTP"/> + <value value="4" name="A8XX_HLSQ_DP_STR"/> +</enum> + +<enum name="a8xx_cluster"> + <value value="0" name="A8XX_CLUSTER_NONE"/> + <value value="1" name="A8XX_CLUSTER_FE_US"/> + <value value="2" name="A8XX_CLUSTER_FE_S"/> + <value value="3" name="A8XX_CLUSTER_SP_VS"/> + <value value="4" name="A8XX_CLUSTER_VPC_VS"/> + <value value="5" name="A8XX_CLUSTER_VPC_US"/> + <value value="6" name="A8XX_CLUSTER_GRAS"/> + <value value="7" name="A8XX_CLUSTER_SP_PS"/> + <value value="8" name="A8XX_CLUSTER_VPC_PS"/> + <value value="9" name="A8XX_CLUSTER_PS"/> +</enum> + +<enum name="a8xx_debugbus_id"> + <value value="1" name="A8XX_DEBUGBUS_GBIF_CX_GC_US_I_0"/> + <value value="2" name="A8XX_DEBUGBUS_GMU_CX_GC_US_I_0"/> + <value value="3" name="A8XX_DEBUGBUS_CX_GC_US_I_0"/> + <value value="8" name="A8XX_DEBUGBUS_GBIF_GX_GC_US_I_0"/> + <value value="9" name="A8XX_DEBUGBUS_GMU_GX_GC_US_I_0"/> + <value value="10" name="A8XX_DEBUGBUS_DBGC_GC_US_I_0"/> + <value value="11" name="A8XX_DEBUGBUS_RBBM_GC_US_I_0"/> + <value value="12" name="A8XX_DEBUGBUS_LARC_GC_US_I_0"/> + <value value="13" name="A8XX_DEBUGBUS_COM_GC_US_I_0"/> + <value value="14" name="A8XX_DEBUGBUS_HLSQ_GC_US_I_0"/> + <value value="15" name="A8XX_DEBUGBUS_CGC_GC_US_I_0"/> + <value value="20" name="A8XX_DEBUGBUS_VSC_GC_US_I_0_0"/> + <value value="21" name="A8XX_DEBUGBUS_VSC_GC_US_I_0_1"/> + <value value="24" name="A8XX_DEBUGBUS_UFC_GC_US_I_0"/> + <value value="25" name="A8XX_DEBUGBUS_UFC_GC_US_I_1"/> + <value value="40" name="A8XX_DEBUGBUS_CP_GC_US_I_0_0"/> + <value value="41" name="A8XX_DEBUGBUS_CP_GC_US_I_0_1"/> + <value value="42" name="A8XX_DEBUGBUS_CP_GC_US_I_0_2"/> + <value value="56" name="A8XX_DEBUGBUS_PC_BR_US_I_0"/> + <value value="57" name="A8XX_DEBUGBUS_PC_BV_US_I_0"/> + <value value="58" name="A8XX_DEBUGBUS_GPC_BR_US_I_0"/> + <value value="59" name="A8XX_DEBUGBUS_GPC_BV_US_I_0"/> + <value value="60" name="A8XX_DEBUGBUS_VPC_BR_US_I_0"/> + <value value="61" name="A8XX_DEBUGBUS_VPC_BV_US_I_0"/> + <value value="80" name="A8XX_DEBUGBUS_UCHE_WRAPPER_GC_US_I_0"/> + <value value="81" name="A8XX_DEBUGBUS_UCHE_GC_US_I_0"/> + <value value="82" name="A8XX_DEBUGBUS_UCHE_GC_US_I_1"/> + <value value="83" name="A8XX_DEBUGBUS_UCHE_GC_US_I_0_1"/> + <value value="84" name="A8XX_DEBUGBUS_UCHE_GC_US_I_1_1"/> + <value value="128" name="A8XX_DEBUGBUS_CP_GC_S_0_I_0"/> + <value value="129" name="A8XX_DEBUGBUS_PC_BR_S_0_I_0"/> + <value value="130" name="A8XX_DEBUGBUS_PC_BV_S_0_I_0"/> + <value value="131" name="A8XX_DEBUGBUS_TESS_GC_S_0_I_0"/> + <value value="132" name="A8XX_DEBUGBUS_TSEFE_GC_S_0_I_0"/> + <value value="133" name="A8XX_DEBUGBUS_TSEBE_GC_S_0_I_0"/> + <value value="134" name="A8XX_DEBUGBUS_RAS_GC_S_0_I_0"/> + <value value="135" name="A8XX_DEBUGBUS_LRZ_BR_S_0_I_0"/> + <value value="136" name="A8XX_DEBUGBUS_LRZ_BV_S_0_I_0"/> + <value value="137" name="A8XX_DEBUGBUS_VFDP_GC_S_0_I_0"/> + <value value="138" name="A8XX_DEBUGBUS_GPC_BR_S_0_I_0"/> + <value value="139" name="A8XX_DEBUGBUS_GPC_BV_S_0_I_0"/> + <value value="140" name="A8XX_DEBUGBUS_VPCFE_BR_S_0_I_0"/> + <value value="141" name="A8XX_DEBUGBUS_VPCFE_BV_S_0_I_0"/> + <value value="142" name="A8XX_DEBUGBUS_VPCBE_BR_S_0_I_0"/> + <value value="143" name="A8XX_DEBUGBUS_VPCBE_BV_S_0_I_0"/> + <value value="144" name="A8XX_DEBUGBUS_CCHE_GC_S_0_I_0"/> + <value value="145" name="A8XX_DEBUGBUS_DBGC_GC_S_0_I_0"/> + <value value="146" name="A8XX_DEBUGBUS_LARC_GC_S_0_I_0"/> + <value value="147" name="A8XX_DEBUGBUS_RBBM_GC_S_0_I_0"/> + <value value="148" name="A8XX_DEBUGBUS_CCRE_GC_S_0_I_0"/> + <value value="149" name="A8XX_DEBUGBUS_CGC_GC_S_0_I_0"/> + <value value="150" name="A8XX_DEBUGBUS_GMU_GC_S_0_I_0"/> + <value value="151" name="A8XX_DEBUGBUS_SLICE_GC_S_0_I_0"/> + <value value="152" name="A8XX_DEBUGBUS_HLSQ_SPTP_STAR_GC_S_0_I_0"/> + <value value="160" name="A8XX_DEBUGBUS_USP_GC_S_0_I_0"/> + <value value="161" name="A8XX_DEBUGBUS_USP_GC_S_0_I_1"/> + <value value="166" name="A8XX_DEBUGBUS_USPTP_GC_S_0_I_0"/> + <value value="167" name="A8XX_DEBUGBUS_USPTP_GC_S_0_I_1"/> + <value value="168" name="A8XX_DEBUGBUS_USPTP_GC_S_0_I_2"/> + <value value="169" name="A8XX_DEBUGBUS_USPTP_GC_S_0_I_3"/> + <value value="178" name="A8XX_DEBUGBUS_TP_GC_S_0_I_0"/> + <value value="179" name="A8XX_DEBUGBUS_TP_GC_S_0_I_1"/> + <value value="180" name="A8XX_DEBUGBUS_TP_GC_S_0_I_2"/> + <value value="181" name="A8XX_DEBUGBUS_TP_GC_S_0_I_3"/> + <value value="190" name="A8XX_DEBUGBUS_RB_GC_S_0_I_0"/> + <value value="191" name="A8XX_DEBUGBUS_RB_GC_S_0_I_1"/> + <value value="196" name="A8XX_DEBUGBUS_CCU_GC_S_0_I_0"/> + <value value="197" name="A8XX_DEBUGBUS_CCU_GC_S_0_I_1"/> + <value value="202" name="A8XX_DEBUGBUS_HLSQ_GC_S_0_I_0"/> + <value value="203" name="A8XX_DEBUGBUS_HLSQ_GC_S_0_I_1"/> + <value value="208" name="A8XX_DEBUGBUS_VFD_GC_S_0_I_0"/> + <value value="209" name="A8XX_DEBUGBUS_VFD_GC_S_0_I_1"/> + <value value="256" name="A8XX_DEBUGBUS_CP_GC_S_1_I_0"/> + <value value="257" name="A8XX_DEBUGBUS_PC_BR_S_1_I_0"/> + <value value="258" name="A8XX_DEBUGBUS_PC_BV_S_1_I_0"/> + <value value="259" name="A8XX_DEBUGBUS_TESS_GC_S_1_I_0"/> + <value value="260" name="A8XX_DEBUGBUS_TSEFE_GC_S_1_I_0"/> + <value value="261" name="A8XX_DEBUGBUS_TSEBE_GC_S_1_I_0"/> + <value value="262" name="A8XX_DEBUGBUS_RAS_GC_S_1_I_0"/> + <value value="263" name="A8XX_DEBUGBUS_LRZ_BR_S_1_I_0"/> + <value value="264" name="A8XX_DEBUGBUS_LRZ_BV_S_1_I_0"/> + <value value="265" name="A8XX_DEBUGBUS_VFDP_GC_S_1_I_0"/> + <value value="266" name="A8XX_DEBUGBUS_GPC_BR_S_1_I_0"/> + <value value="267" name="A8XX_DEBUGBUS_GPC_BV_S_1_I_0"/> + <value value="268" name="A8XX_DEBUGBUS_VPCFE_BR_S_1_I_0"/> + <value value="269" name="A8XX_DEBUGBUS_VPCFE_BV_S_1_I_0"/> + <value value="270" name="A8XX_DEBUGBUS_VPCBE_BR_S_1_I_0"/> + <value value="271" name="A8XX_DEBUGBUS_VPCBE_BV_S_1_I_0"/> + <value value="272" name="A8XX_DEBUGBUS_CCHE_GC_S_1_I_0"/> + <value value="273" name="A8XX_DEBUGBUS_DBGC_GC_S_1_I_0"/> + <value value="274" name="A8XX_DEBUGBUS_LARC_GC_S_1_I_0"/> + <value value="275" name="A8XX_DEBUGBUS_RBBM_GC_S_1_I_0"/> + <value value="276" name="A8XX_DEBUGBUS_CCRE_GC_S_1_I_0"/> + <value value="277" name="A8XX_DEBUGBUS_CGC_GC_S_1_I_0"/> + <value value="278" name="A8XX_DEBUGBUS_GMU_GC_S_1_I_0"/> + <value value="279" name="A8XX_DEBUGBUS_SLICE_GC_S_1_I_0"/> + <value value="280" name="A8XX_DEBUGBUS_HLSQ_SPTP_STAR_GC_S_1_I_0"/> + <value value="288" name="A8XX_DEBUGBUS_USP_GC_S_1_I_0"/> + <value value="289" name="A8XX_DEBUGBUS_USP_GC_S_1_I_1"/> + <value value="294" name="A8XX_DEBUGBUS_USPTP_GC_S_1_I_0"/> + <value value="295" name="A8XX_DEBUGBUS_USPTP_GC_S_1_I_1"/> + <value value="296" name="A8XX_DEBUGBUS_USPTP_GC_S_1_I_2"/> + <value value="297" name="A8XX_DEBUGBUS_USPTP_GC_S_1_I_3"/> + <value value="306" name="A8XX_DEBUGBUS_TP_GC_S_1_I_0"/> + <value value="307" name="A8XX_DEBUGBUS_TP_GC_S_1_I_1"/> + <value value="308" name="A8XX_DEBUGBUS_TP_GC_S_1_I_2"/> + <value value="309" name="A8XX_DEBUGBUS_TP_GC_S_1_I_3"/> + <value value="318" name="A8XX_DEBUGBUS_RB_GC_S_1_I_0"/> + <value value="319" name="A8XX_DEBUGBUS_RB_GC_S_1_I_1"/> + <value value="324" name="A8XX_DEBUGBUS_CCU_GC_S_1_I_0"/> + <value value="325" name="A8XX_DEBUGBUS_CCU_GC_S_1_I_1"/> + <value value="330" name="A8XX_DEBUGBUS_HLSQ_GC_S_1_I_0"/> + <value value="331" name="A8XX_DEBUGBUS_HLSQ_GC_S_1_I_1"/> + <value value="336" name="A8XX_DEBUGBUS_VFD_GC_S_1_I_0"/> + <value value="337" name="A8XX_DEBUGBUS_VFD_GC_S_1_I_1"/> + <value value="384" name="A8XX_DEBUGBUS_CP_GC_S_2_I_0"/> + <value value="385" name="A8XX_DEBUGBUS_PC_BR_S_2_I_0"/> + <value value="386" name="A8XX_DEBUGBUS_PC_BV_S_2_I_0"/> + <value value="387" name="A8XX_DEBUGBUS_TESS_GC_S_2_I_0"/> + <value value="388" name="A8XX_DEBUGBUS_TSEFE_GC_S_2_I_0"/> + <value value="389" name="A8XX_DEBUGBUS_TSEBE_GC_S_2_I_0"/> + <value value="390" name="A8XX_DEBUGBUS_RAS_GC_S_2_I_0"/> + <value value="391" name="A8XX_DEBUGBUS_LRZ_BR_S_2_I_0"/> + <value value="392" name="A8XX_DEBUGBUS_LRZ_BV_S_2_I_0"/> + <value value="393" name="A8XX_DEBUGBUS_VFDP_GC_S_2_I_0"/> + <value value="394" name="A8XX_DEBUGBUS_GPC_BR_S_2_I_0"/> + <value value="395" name="A8XX_DEBUGBUS_GPC_BV_S_2_I_0"/> + <value value="396" name="A8XX_DEBUGBUS_VPCFE_BR_S_2_I_0"/> + <value value="397" name="A8XX_DEBUGBUS_VPCFE_BV_S_2_I_0"/> + <value value="398" name="A8XX_DEBUGBUS_VPCBE_BR_S_2_I_0"/> + <value value="399" name="A8XX_DEBUGBUS_VPCBE_BV_S_2_I_0"/> + <value value="400" name="A8XX_DEBUGBUS_CCHE_GC_S_2_I_0"/> + <value value="401" name="A8XX_DEBUGBUS_DBGC_GC_S_2_I_0"/> + <value value="402" name="A8XX_DEBUGBUS_LARC_GC_S_2_I_0"/> + <value value="403" name="A8XX_DEBUGBUS_RBBM_GC_S_2_I_0"/> + <value value="404" name="A8XX_DEBUGBUS_CCRE_GC_S_2_I_0"/> + <value value="405" name="A8XX_DEBUGBUS_CGC_GC_S_2_I_0"/> + <value value="406" name="A8XX_DEBUGBUS_GMU_GC_S_2_I_0"/> + <value value="407" name="A8XX_DEBUGBUS_SLICE_GC_S_2_I_0"/> + <value value="408" name="A8XX_DEBUGBUS_HLSQ_SPTP_STAR_GC_S_2_I_0"/> + <value value="416" name="A8XX_DEBUGBUS_USP_GC_S_2_I_0"/> + <value value="417" name="A8XX_DEBUGBUS_USP_GC_S_2_I_1"/> + <value value="422" name="A8XX_DEBUGBUS_USPTP_GC_S_2_I_0"/> + <value value="423" name="A8XX_DEBUGBUS_USPTP_GC_S_2_I_1"/> + <value value="424" name="A8XX_DEBUGBUS_USPTP_GC_S_2_I_2"/> + <value value="425" name="A8XX_DEBUGBUS_USPTP_GC_S_2_I_3"/> + <value value="434" name="A8XX_DEBUGBUS_TP_GC_S_2_I_0"/> + <value value="435" name="A8XX_DEBUGBUS_TP_GC_S_2_I_1"/> + <value value="436" name="A8XX_DEBUGBUS_TP_GC_S_2_I_2"/> + <value value="437" name="A8XX_DEBUGBUS_TP_GC_S_2_I_3"/> + <value value="446" name="A8XX_DEBUGBUS_RB_GC_S_2_I_0"/> + <value value="447" name="A8XX_DEBUGBUS_RB_GC_S_2_I_1"/> + <value value="452" name="A8XX_DEBUGBUS_CCU_GC_S_2_I_0"/> + <value value="453" name="A8XX_DEBUGBUS_CCU_GC_S_2_I_1"/> + <value value="458" name="A8XX_DEBUGBUS_HLSQ_GC_S_2_I_0"/> + <value value="459" name="A8XX_DEBUGBUS_HLSQ_GC_S_2_I_1"/> + <value value="464" name="A8XX_DEBUGBUS_VFD_GC_S_2_I_0"/> + <value value="465" name="A8XX_DEBUGBUS_VFD_GC_S_2_I_1"/> +</enum> + +<enum name="a8xx_usptp_id"> + <value value="0" name="A8XX_uSPTP0"/> + <value value="1" name="A8XX_uSPTP1"/> + <value value="15" name="A8XX_SPTOP"/> +</enum> + +<enum name="a8xx_tex_swiz"> + <value name="A8XX_SWIZ_IDENTITY" value="0"/> + <value name="A8XX_SWIZ_ZERO" value="1"/> + <value name="A8XX_SWIZ_ONE" value="2"/> + <value name="A8XX_SWIZ_X" value="3"/> + <value name="A8XX_SWIZ_Y" value="4"/> + <value name="A8XX_SWIZ_Z" value="5"/> + <value name="A8XX_SWIZ_W" value="6"/> +</enum> + +</database> diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml index 218ec8bb966e..79d204f1e400 100644 --- a/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml @@ -11,6 +11,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <value name="A5XX" value="5"/> <value name="A6XX" value="6"/> <value name="A7XX" value="7"/> + <value name="A8XX" value="8"/> </enum> <enum name="adreno_pa_su_sc_draw"> @@ -397,4 +398,15 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <value value="0x7" name="TEX_PREFETCH_UNK7"/> </enum> +<enum name="adreno_pipe"> + <value value="0" name="PIPE_NONE"/> + <value value="1" name="PIPE_BR"/> + <value value="2" name="PIPE_BV"/> + <value value="3" name="PIPE_LPAC"/> + <value value="4" name="PIPE_AQE0"/> + <value value="5" name="PIPE_AQE1"/> + <value value="6" name="PIPE_DDE_BR"/> + <value value="7" name="PIPE_DDE_BV"/> +</enum> + </database> diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml index 0e10e1c6d263..51e9c94f5e37 100644 --- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml @@ -6,103 +6,102 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <import file="adreno/adreno_common.xml"/> <enum name="vgt_event_type" varset="chip"> - <value name="VS_DEALLOC" value="0"/> - <value name="PS_DEALLOC" value="1" variants="A2XX-A6XX"/> - <value name="VS_DONE_TS" value="2"/> - <value name="PS_DONE_TS" value="3"/> + <value name="VS_DEALLOC" value="0x00" variants="A2XX-A5XX"/> + <value name="PS_DEALLOC" value="0x01" variants="A2XX-A5XX"/> + <value name="VS_DONE_TS" value="0x02" variants="A2XX-A5XX"/> + <value name="PS_DONE_TS" value="0x03" variants="A2XX-A5XX"/> <doc> Flushes dirty data from UCHE, and also writes a GPU timestamp to the address if one is provided. </doc> - <value name="CACHE_FLUSH_TS" value="4"/> - <value name="CONTEXT_DONE" value="5"/> - <value name="CACHE_FLUSH" value="6" variants="A2XX-A4XX"/> - <value name="VIZQUERY_START" value="7" variants="A2XX"/> - <value name="HLSQ_FLUSH" value="7" variants="A3XX-A4XX"/> - <value name="VIZQUERY_END" value="8" variants="A2XX"/> - <value name="SC_WAIT_WC" value="9" variants="A2XX"/> - <value name="WRITE_PRIMITIVE_COUNTS" value="9" variants="A6XX-"/> - <value name="START_PRIMITIVE_CTRS" value="11" variants="A6XX-"/> - <value name="STOP_PRIMITIVE_CTRS" value="12" variants="A6XX-"/> + <value name="CACHE_FLUSH_TS" value="0x04"/> + <value name="CONTEXT_DONE" value="0x05"/> + <value name="CACHE_FLUSH" value="0x06" variants="A2XX-A4XX"/> + <value name="VIZQUERY_START" value="0x07" variants="A2XX"/> + <value name="HLSQ_FLUSH" value="0x07" variants="A3XX-A4XX"/> + <value name="VIZQUERY_END" value="0x08" variants="A2XX"/> + <value name="SC_WAIT_WC" value="0x09" variants="A2XX"/> + <value name="WRITE_PRIMITIVE_COUNTS" value="0x09" variants="A6XX-"/> + <value name="START_PRIMITIVE_CTRS" value="0x0b" variants="A6XX-"/> + <value name="STOP_PRIMITIVE_CTRS" value="0x0c" variants="A6XX-"/> <!-- Not sure that these 4 events don't have the same meaning as on A5XX+ --> - <value name="RST_PIX_CNT" value="13" variants="A2XX-A4XX"/> - <value name="RST_VTX_CNT" value="14" variants="A2XX-A4XX"/> - <value name="TILE_FLUSH" value="15" variants="A2XX-A4XX"/> - <value name="STAT_EVENT" value="16" variants="A2XX-A4XX"/> - <value name="CACHE_FLUSH_AND_INV_TS_EVENT" value="20" variants="A2XX-A4XX"/> + <value name="RST_PIX_CNT" value="0x0d" variants="A2XX-A4XX"/> + <value name="RST_VTX_CNT" value="0x0e" variants="A2XX-A4XX"/> + <value name="TILE_FLUSH" value="0x0f" variants="A2XX-A4XX"/> + <value name="STAT_EVENT" value="0x10" variants="A2XX-A4XX"/> + <value name="CACHE_FLUSH_AND_INV_TS_EVENT" value="0x14" variants="A2XX-A4XX"/> <doc> If A6XX_RB_SAMPLE_COUNTER_CNTL.copy is true, writes OQ Z passed sample counts to RB_SAMPLE_COUNTER_BASE. This writes to main memory, skipping UCHE. </doc> - <value name="ZPASS_DONE" value="21"/> - <value name="CACHE_FLUSH_AND_INV_EVENT" value="22" variants="A2XX"/> + <value name="ZPASS_DONE" value="0x15"/> + <value name="CACHE_FLUSH_AND_INV_EVENT" value="0x16" variants="A2XX"/> <doc> Writes the GPU timestamp to the address that follows, once RB access and flushes are complete. </doc> - <value name="RB_DONE_TS" value="22" variants="A3XX-"/> + <value name="RB_DONE_TS" value="0x16" variants="A3XX-"/> - <value name="PERFCOUNTER_START" value="23" variants="A2XX-A4XX"/> - <value name="PERFCOUNTER_STOP" value="24" variants="A2XX-A4XX"/> - <value name="VS_FETCH_DONE" value="27"/> - <value name="FACENESS_FLUSH" value="28" variants="A2XX-A4XX"/> + <value name="PERFCOUNTER_START" value="0x17" variants="A2XX-A4XX"/> + <value name="PERFCOUNTER_STOP" value="0x18" variants="A2XX-A4XX"/> + <value name="VS_FETCH_DONE" value="0x1b" variants="A2XX-A5XX"/> + <value name="FACENESS_FLUSH" value="0x1c" variants="A2XX-A4XX"/> <!-- a5xx events --> - <value name="WT_DONE_TS" value="8" variants="A5XX-"/> - <value name="START_FRAGMENT_CTRS" value="13" variants="A5XX-"/> - <value name="STOP_FRAGMENT_CTRS" value="14" variants="A5XX-"/> - <value name="START_COMPUTE_CTRS" value="15" variants="A5XX-"/> - <value name="STOP_COMPUTE_CTRS" value="16" variants="A5XX-"/> - <value name="FLUSH_SO_0" value="17" variants="A5XX-"/> - <value name="FLUSH_SO_1" value="18" variants="A5XX-"/> - <value name="FLUSH_SO_2" value="19" variants="A5XX-"/> - <value name="FLUSH_SO_3" value="20" variants="A5XX-"/> + <value name="WT_DONE_TS" value="0x08" variants="A5XX-A6XX"/> + <value name="START_FRAGMENT_CTRS" value="0x0d" variants="A5XX-"/> + <value name="STOP_FRAGMENT_CTRS" value="0x0e" variants="A5XX-"/> + <value name="START_COMPUTE_CTRS" value="0x0f" variants="A5XX-"/> + <value name="STOP_COMPUTE_CTRS" value="0x10" variants="A5XX-"/> + <value name="FLUSH_SO_0" value="0x11" variants="A5XX-"/> + <value name="FLUSH_SO_1" value="0x12" variants="A5XX-"/> + <value name="FLUSH_SO_2" value="0x13" variants="A5XX-"/> + <value name="FLUSH_SO_3" value="0x14" variants="A5XX-"/> <doc> Invalidates depth attachment data from the CCU. We assume this happens in the last stage. </doc> - <value name="PC_CCU_INVALIDATE_DEPTH" value="24" variants="A5XX-"/> + <value name="PC_CCU_INVALIDATE_DEPTH" value="0x18" variants="A5XX-A6XX"/> <doc> Invalidates color attachment data from the CCU. We assume this happens in the last stage. </doc> - <value name="PC_CCU_INVALIDATE_COLOR" value="25" variants="A5XX-"/> + <value name="PC_CCU_INVALIDATE_COLOR" value="0x19" variants="A5XX-A6XX"/> <doc> Flushes the small cache used by CP_EVENT_WRITE::BLIT (which, along with its registers, would be better named RESOLVE). </doc> - <value name="PC_CCU_RESOLVE_TS" value="26" variants="A6XX"/> + <value name="PC_CCU_RESOLVE_TS" value="0x1a" variants="A6XX"/> <doc> Flushes depth attachment data from the CCU. We assume this happens in the last stage. </doc> - <value name="PC_CCU_FLUSH_DEPTH_TS" value="28" variants="A5XX-"/> + <value name="PC_CCU_FLUSH_DEPTH_TS" value="0x1c" variants="A5XX-A6XX"/> <doc> Flushes color attachment data from the CCU. We assume this happens in the last stage. </doc> - <value name="PC_CCU_FLUSH_COLOR_TS" value="29" variants="A5XX-"/> + <value name="PC_CCU_FLUSH_COLOR_TS" value="0x1d" variants="A5XX-A6XX"/> <doc> - 2D blit to resolve GMEM to system memory (skipping CCU) at the - end of a render pass. Compare to CP_BLIT's BLIT_OP_SCALE for - more general blitting. + Triggers a resolve (GMEM to sysmem) or unresolve (sysmem to + GMEM) or clear blit, depending on CCU programming. </doc> - <value name="BLIT" value="30" variants="A5XX-"/> + <value name="CCU_RESOLVE" value="0x1e" variants="A5XX-"/> <doc> Flip between the primary and secondary LRZ buffers. This is used for concurrent binning, so that BV can write to one buffer while BR reads from the other. </doc> - <value name="LRZ_FLIP_BUFFER" value="36" variants="A7XX"/> + <value name="LRZ_FLIP_BUFFER" value="0x24" variants="A7XX-"/> <doc> Clears based on GRAS_LRZ_CNTL configuration, could clear @@ -115,44 +114,46 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> CUR_DIR_UNSET = 0x3 Clear of direction means setting the direction to CUR_DIR_UNSET. </doc> - <value name="LRZ_CLEAR" value="37" variants="A5XX-"/> - - <value name="LRZ_FLUSH" value="38" variants="A5XX-"/> - <value name="BLIT_OP_FILL_2D" value="39" variants="A5XX-"/> - <value name="BLIT_OP_COPY_2D" value="40" variants="A5XX-A6XX"/> - <value name="LRZ_CACHE_INVALIDATE" value="40" variants="A7XX"/> - <value name="LRZ_Q_CACHE_INVALIDATE" value="41" variants="A7XX"/> - <value name="BLIT_OP_SCALE_2D" value="42" variants="A5XX-"/> - <value name="CONTEXT_DONE_2D" value="43" variants="A5XX-"/> - <value name="VSC_BINNING_START" value="44" variants="A5XX-"/> - <value name="VSC_BINNING_END" value="45" variants="A5XX-"/> + <value name="LRZ_CLEAR" value="0x25" variants="A5XX-"/> + + <value name="LRZ_FLUSH_INVALIDATE" value="0x26" variants="A5XX-A6XX"/> + <value name="LRZ_CACHE_FLUSH" value="0x26" variants="A7XX-"/> + <value name="BLIT_OP_FILL_2D" value="0x27" variants="A5XX-A6XX"/> + <value name="BLIT_OP_COPY_2D" value="0x28" variants="A5XX-A6XX"/> + <value name="LRZ_CACHE_INVALIDATE" value="0x28" variants="A7XX-"/> + <value name="LRZ_Q_CACHE_INVALIDATE" value="0x29" variants="A7XX-"/> + <value name="BLIT_OP_SCALE_2D" value="0x2a" variants="A5XX-"/> + <value name="CONTEXT_DONE_2D" value="0x2b" variants="A5XX-"/> + <value name="VSC_BINNING_START" value="0x2c" variants="A5XX-"/> + <value name="VSC_BINNING_END" value="0x2d" variants="A5XX-"/> <!-- a6xx events --> <doc> Invalidates UCHE. </doc> - <value name="CACHE_INVALIDATE" value="49" variants="A6XX"/> + <value name="CACHE_INVALIDATE" value="0x31" variants="A6XX"/> - <value name="LABEL" value="63" variants="A6XX-"/> + <value name="DEBUG_LABEL" value="0x3f" variants="A6XX-"/> <!-- note, some of these are the same as a6xx, just named differently --> <doc> Doesn't seem to do anything </doc> - <value name="DUMMY_EVENT" value="1" variants="A7XX"/> - <value name="CCU_INVALIDATE_DEPTH" value="24" variants="A7XX"/> - <value name="CCU_INVALIDATE_COLOR" value="25" variants="A7XX"/> - <value name="CCU_RESOLVE_CLEAN" value="26" variants="A7XX"/> - <value name="CCU_FLUSH_DEPTH" value="28" variants="A7XX"/> - <value name="CCU_FLUSH_COLOR" value="29" variants="A7XX"/> - <value name="CCU_RESOLVE" value="30" variants="A7XX"/> - <value name="CCU_END_RESOLVE_GROUP" value="31" variants="A7XX"/> - <value name="CCU_CLEAN_DEPTH" value="32" variants="A7XX"/> - <value name="CCU_CLEAN_COLOR" value="33" variants="A7XX"/> - <value name="CACHE_RESET" value="48" variants="A7XX"/> - <value name="CACHE_CLEAN" value="49" variants="A7XX"/> + <value name="DUMMY_EVENT" value="0x01" variants="A7XX-"/> + <value name="CCU_INVALIDATE_DEPTH" value="0x18" variants="A7XX-"/> + <value name="CCU_INVALIDATE_COLOR" value="0x19" variants="A7XX-"/> + <value name="CCU_RESOLVE_CLEAN" value="0x1a" variants="A7XX-"/> + <value name="CCU_FLUSH_DEPTH" value="0x1c" variants="A7XX-"/> + <value name="CCU_FLUSH_COLOR" value="0x1d" variants="A7XX-"/> + <value name="CCU_END_RESOLVE_GROUP" value="0x1f" variants="A7XX-"/> + <value name="CCU_CLEAN_DEPTH" value="0x20" variants="A7XX-"/> + <value name="CCU_CLEAN_COLOR" value="0x21" variants="A7XX-"/> + <value name="CACHE_RESET" value="0x30" variants="A7XX-"/> + <value name="CACHE_CLEAN" value="0x31" variants="A7XX-"/> <!-- TODO: deal with name conflicts with other gens --> - <value name="CACHE_FLUSH7" value="50" variants="A7XX"/> - <value name="CACHE_INVALIDATE7" value="51" variants="A7XX"/> + <value name="CACHE_FLUSH7" value="0x32" variants="A7XX-"/> + <value name="CACHE_INVALIDATE7" value="0x33" variants="A7XX-"/> + <value name="DEPTH_BUFFER_FLIP" value="0x3d" variants="A8XX-"/> + <value name="CCH_FAST_CLEAR_CLEAN" value="0x1b" variants="A8XX-"/> </enum> <enum name="pc_di_primtype"> @@ -310,11 +311,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <value name="CP_EVENT_WRITE" value="0x46" variants="A2XX-A6XX"/> <value name="CP_EVENT_WRITE7" value="0x46" variants="A7XX-"/> <doc>generate a VS|PS_done event</doc> - <value name="CP_EVENT_WRITE_SHD" value="0x58"/> + <value name="CP_EVENT_WRITE_SHD" value="0x58" variants="A2XX"/> <doc>generate a cache flush done event</doc> - <value name="CP_EVENT_WRITE_CFL" value="0x59"/> + <value name="CP_EVENT_WRITE_CFL" value="0x59" variants="A2XX"/> <doc>generate a z_pass done event</doc> - <value name="CP_EVENT_WRITE_ZPD" value="0x5b"/> + <value name="CP_EVENT_WRITE_ZPD" value="0x5b" variants="A2XX"/> <doc> not sure the real name, but this seems to be what is used for opencl, instead of CP_DRAW_INDX.. @@ -335,9 +336,9 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <doc>load constant into chip and to memory</doc> <value name="CP_SET_CONSTANT" value="0x2d" variants="A2XX"/> <doc>load sequencer instruction memory (pointer-based)</doc> - <value name="CP_IM_LOAD" value="0x27"/> + <value name="CP_IM_LOAD" value="0x27" variants="A2XX"/> <doc>load sequencer instruction memory (code embedded in packet)</doc> - <value name="CP_IM_LOAD_IMMEDIATE" value="0x2b"/> + <value name="CP_IM_LOAD_IMMEDIATE" value="0x2b" variants="A2XX"/> <doc>load constants from a location in memory</doc> <value name="CP_LOAD_CONSTANT_CONTEXT" value="0x2e" variants="A2XX"/> <doc>selective invalidation of state pointers</doc> @@ -662,6 +663,12 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <value name="CP_CCHE_INVALIDATE" value="0x3a" variants="A7XX-"/> <value name="CP_SCOPE_CNTL" value="0x6c" variants="A7XX-"/> + + <value name="CP_SKIP_IB_MODE" value="0x27" variants="A7XX-"/> + + <value name="CP_MEMORY_MAP_UPDATE" value="0x58" variants="A8XX-"/> + + <value name="CP_BARRIER" value="0x59" variants="A8XX-"/> </enum> @@ -1800,49 +1807,73 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) <value value="6" name="RM6_BIN_RESOLVE"/> <value value="7" name="RM6_BIN_RENDER_END"/> <value value="8" name="RM6_COMPUTE"/> - <value value="0xc" name="RM6_BLIT2DSCALE"/> <!-- no-op (at least on current sqe fw) --> + <value value="12" name="RM6_BLIT2DSCALE"/> <!-- no-op (at least on current sqe fw) --> <!-- These values come from a6xx_set_marker() in the downstream kernel, and they can only be set by the kernel --> - <value value="0xd" name="RM6_IB1LIST_START"/> - <value value="0xe" name="RM6_IB1LIST_END"/> + <value value="13" name="RM6_IB1LIST_START"/> + <value value="14" name="RM6_IB1LIST_END"/> + <value value="15" name="RM7_BIN_VISIBILITY_END"/> + + <!-- new in a8xx: --> + <value value="32" name="RM8_DEPTH_PASS_START"/> + <value value="33" name="RM8_DEPTH_PASS_END"/> + <value value="34" name="RM8_SET_RENDER_TARGET"/> + <value value="35" name="RM8_PGMEM_ON"/> + <value value="36" name="RM8_PGMEM_OFF"/> </enum> - <reg32 offset="0" name="0"> - <!-- if b8 is set, the low bits are interpreted differently (and b4 ignored) --> - <bitfield name="MARKER_MODE" pos="8" type="set_marker_mode" addvariant="yes"/> - - <bitfield name="MODE" low="0" high="3" type="a6xx_marker" varset="set_marker_mode" variants="SET_RENDER_MODE"/> - <!-- used by preemption to determine if GMEM needs to be saved or not --> - <bitfield name="USES_GMEM" pos="4" type="boolean" varset="set_marker_mode" variants="SET_RENDER_MODE"/> - - <bitfield name="IFPC_MODE" pos="0" type="a6xx_ifpc_mode" varset="set_marker_mode" variants="SET_IFPC_MODE"/> - - <!-- - CP_SET_MARKER is used with these bits to create a - critical section around a workaround for ray tracing. - The workaround happens after BVH building, and appears - to invalidate the RTU's BVH node cache. It makes sure - that only one of BR/BV/LPAC is executing the - workaround at a time, and no draws using RT on BV/LPAC - are executing while the workaround is executed on BR (or - vice versa, that no draws on BV/BR using RT are executed - while the workaround executes on LPAC), by - hooking subsequent CP_EVENT_WRITE/CP_DRAW_*/CP_EXEC_CS. - The blob usage is: - - CP_SET_MARKER(RT_WA_START) - ... workaround here ... - CP_SET_MARKER(RT_WA_END) - ... - CP_SET_MARKER(SHADER_USES_RT) - CP_DRAW_INDX(...) or CP_EXEC_CS(...) - --> - <bitfield name="SHADER_USES_RT" pos="9" type="boolean" variants="A7XX-"/> - <bitfield name="RT_WA_START" pos="10" type="boolean" variants="A7XX-"/> - <bitfield name="RT_WA_END" pos="11" type="boolean" variants="A7XX-"/> - </reg32> + <stripe varset="chip" variants="A6XX-A7XX"> + <reg32 offset="0" name="0"> + <!-- if b8 is set, the low bits are interpreted differently (and b4 ignored) --> + <bitfield name="MARKER_MODE" pos="8" type="set_marker_mode" addvariant="yes"/> + + + <bitfield name="MODE" low="0" high="3" type="a6xx_marker" varset="set_marker_mode" variants="SET_RENDER_MODE"/> + <!-- used by preemption to determine if GMEM needs to be saved or not --> + <bitfield name="USES_GMEM" pos="4" type="boolean" varset="set_marker_mode" variants="SET_RENDER_MODE"/> + + + <bitfield name="IFPC_MODE" pos="0" type="a6xx_ifpc_mode" varset="set_marker_mode" variants="SET_IFPC_MODE"/> + + + <!-- + CP_SET_MARKER is used with these bits to create a + critical section around a workaround for ray tracing. + The workaround happens after BVH building, and appears + to invalidate the RTU's BVH node cache. It makes sure + that only one of BR/BV/LPAC is executing the + workaround at a time, and no draws using RT on BV/LPAC + are executing while the workaround is executed on BR (or + vice versa, that no draws on BV/BR using RT are executed + while the workaround executes on LPAC), by + hooking subsequent CP_EVENT_WRITE/CP_DRAW_*/CP_EXEC_CS. + The blob usage is: + + + CP_SET_MARKER(RT_WA_START) + ... workaround here ... + CP_SET_MARKER(RT_WA_END) + ... + CP_SET_MARKER(SHADER_USES_RT) + CP_DRAW_INDX(...) or CP_EXEC_CS(...) + --> + <bitfield name="SHADER_USES_RT" pos="9" type="boolean" variants="A7XX-"/> + <bitfield name="RT_WA_START" pos="10" type="boolean" variants="A7XX-"/> + <bitfield name="RT_WA_END" pos="11" type="boolean" variants="A7XX-"/> + </reg32> + </stripe> + <stripe varset="chip" variants="A8XX-"> + <reg32 offset="0" name="0"> + <!-- if b8 is set, the low bits are interpreted differently (and b4 ignored) --> + <bitfield name="MARKER_MODE" pos="8" type="set_marker_mode" addvariant="yes"/> + <bitfield name="MODE" low="0" high="6" type="a6xx_marker" varset="set_marker_mode" variants="SET_RENDER_MODE"/> + <bitfield name="USES_GMEM" pos="7" type="boolean" varset="set_marker_mode" variants="SET_RENDER_MODE"/> + <bitfield name="IFPC_MODE" pos="0" type="a6xx_ifpc_mode" varset="set_marker_mode" variants="SET_IFPC_MODE"/> + <!-- idk if the RT w/a fields apply to a8xx as well --> + </reg32> + </stripe> </domain> <domain name="CP_SET_PSEUDO_REG" width="32" varset="chip" prefix="chip" variants="A6XX-"> @@ -2066,6 +2097,14 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) payload *and* skipsaverestore is set. This is expected to restore static register values not saved when skipsaverestore is set. + + On BV, a skipsaverestore preemption is triggered + and this preamble type is executed whenever a + CP_THREAD_CONTROL that synchronizes threads + happens. This can be explicitly via + SYNC_THREADS, or implicitly when the value of + CONCURRENT_BIN_DISABLE changes from the previous + thread control. </doc> </value> <value name="POSTAMBLE_AMBLE_TYPE" value="2"> @@ -2308,5 +2347,99 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) </reg32> </domain> +<domain name="CP_RESOURCE_LIST" width="32"> + <doc> + A7xx introduces the "resource table" which is managed by + CP_RESOURCE_LIST. It is used to synchronize BR and BV access + to resources such as LRZ buffers. + + The resource table consists of resources that are in-use by BR. + Each "resource" has a base address, which is + usually a pointer but is treated by the HW as an opaque handle, + a read/write bit, and a timestamp when it was last used. + Resources are removed from the table upon event completion when + a special CP_EVENT_WRITE::CLEAR_RENDER_RESOURCE bit is set, which + will remove all resources with a timestamp up to the current + timestamp. + + CP_RESOURCE_LIST first specifies a list of BV resources. For + each BV resource, the HW will check if there is a corresponding + BR resource in the table, and if at least one of the BV and BR + resources is marked WRITE then it will stall until the BR + resource is removed. + + It then specifies a list of BR resources. These will be added to + the resource table, unless there is an overflow in which case + the designated overflow register will have bit 0 set. Overflow + should cause the next binning pass to stall until BR is done, + effectively disabling concurrent binning. + + CP_RESOURCE_LIST must be executed by BV. BR resources are added + by BV and removed by BR. + + There is a separate table for "LRZ resources." These behave a + bit differently: specifying an LRZ resource via BV_RES_LRZ + stalls on any matching resource existing and then adds it to the + table, making it both a BV and BR resource in one. There is a + separate CLEAR_LRZ_RESOURCE bit for removing resources from the + LRZ table, and it only removes one resource given by a base + address passed to CP_EVENT_WRITE. Therefore timestamps are + unnecessary. + </doc> + <reg32 offset="0" name="BV_COUNT" type="uint"/> + <doc> + What follows is a list of CP_BV_RESOURCE and then CP_RESOURCE_LIST_BR. + </doc> +</domain> + +<domain name="CP_BV_RESOURCE" width="32"> + <doc> + BV resources don't go in the table. Instead CP waits until any + corresponding BR resources with the same base pointer are + finished before the packet completes. + </doc> + <enum name="cp_bv_resource_encoding"> + <value value="0" name="BV_RES_DIRECT"/> + <doc> + INDIRECT resources are encoded as a 32b offset + 3b + bindless base selector. The offset is added to the given + BINDLESS_BASE pseudoregister and then the 64b value + fetched there is used as the pointer. + </doc> + <value value="1" name="BV_RES_INDIRECT_READ"/> + <value value="2" name="BV_RES_LRZ"/> + <value value="3" name="BV_RES_INDIRECT_WRITE"/> + </enum> + <reg64 offset="0" name="0"> + <bitfield name="BASE_ADDR" low="1" high="61" shr="1" type="address"/> + <bitfield name="WRITE" pos="0" type="boolean"/> + <bitfield name="ENCODING" low="62" high="63" type="cp_bv_resource_encoding"/> + </reg64> +</domain> + +<domain name="CP_RESOURCE_LIST_BR" width="32"> + <reg32 offset="0" name="0"> + <bitfield name="BR_COUNT" low="0" high="23" type="uint"/> + <bitfield name="OVERFLOW_ONCHIP_ADDR" low="24" high="26"/> + <bitfield name="OVERFLOW" pos="31" type="boolean"/> + </reg32> + <doc> + What follows is a list of CP_BR_RESOURCE. + </doc> +</domain> + +<domain name="CP_BR_RESOURCE" width="32"> + <enum name="cp_br_resource_encoding"> + <value value="0" name="BR_RES_DIRECT"/> + <value value="2" name="BR_RES_INDIRECT_READ"/> + <value value="3" name="BR_RES_INDIRECT_WRITE"/> <!-- set WRITE bit --> + </enum> + <reg64 offset="0" name="0"> + <bitfield name="BASE_ADDR" low="1" high="61" shr="1" type="address"/> + <bitfield name="WRITE" pos="0" type="boolean"/> + <bitfield name="ENCODING" low="62" high="63" type="cp_br_resource_encoding"/> + </reg64> +</domain> + </database> diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py index 1d603dadfabd..2acad951f1e2 100644 --- a/drivers/gpu/drm/msm/registers/gen_header.py +++ b/drivers/gpu/drm/msm/registers/gen_header.py @@ -189,12 +189,13 @@ class Bitset(object): print(" return (struct fd_reg_pair) {") print(" .reg = (uint32_t)%s," % reg.reg_offset()) print(" .value =") + cast = "(uint64_t)" if reg.bit_size == 64 else "" for f in self.fields: if f.type in [ "address", "waddress" ]: continue else: type, val = f.ctype("fields.%s" % field_name(reg, f)) - print(" (%-40s << %2d) |" % (val, f.low)) + print(" (%s%-40s << %2d) |" % (cast, val, f.low)) value_name = "dword" if reg.bit_size == 64: value_name = "qword" @@ -264,10 +265,11 @@ class Bitset(object): (prefix, prefix, prefix, skip)) - def dump(self, is_deprecated, prefix=None): + def dump(self, is_deprecated, prefix=None, reg=None): if prefix is None: prefix = self.name - if self.reg and self.reg.bit_size == 64: + reg64 = reg and self.reg and self.reg.bit_size == 64 + if reg64: print("static inline uint32_t %s_LO(uint32_t val)\n{" % prefix) print("\treturn val;\n}") print("static inline uint32_t %s_HI(uint32_t val)\n{" % prefix) @@ -283,14 +285,17 @@ class Bitset(object): elif f.type == "boolean" or (f.type is None and f.low == f.high): tab_to("#define %s" % name, "0x%08x" % (1 << f.low)) else: - tab_to("#define %s__MASK" % name, "0x%08x" % mask(f.low, f.high)) + typespec = "ull" if reg64 else "u" + tab_to("#define %s__MASK" % name, "0x%08x%s" % (mask(f.low, f.high), typespec)) tab_to("#define %s__SHIFT" % name, "%d" % f.low) type, val = f.ctype("val") + ret_type = "uint64_t" if reg64 else "uint32_t" + cast = "(uint64_t)" if reg64 else "" - print("static inline uint32_t %s(%s val)\n{" % (name, type)) + print("static inline %s %s(%s val)\n{" % (ret_type, name, type)) if f.shr > 0: print("\tassert(!(val & 0x%x));" % mask(0, f.shr - 1)) - print("\treturn ((%s) << %s__SHIFT) & %s__MASK;\n}" % (val, name, name)) + print("\treturn (%s(%s) << %s__SHIFT) & %s__MASK;\n}" % (cast, val, name, name)) print() class Array(object): @@ -437,7 +442,7 @@ class Reg(object): print("static inline%s uint32_t REG_%s(%s) { return 0x%08x + %s; }" % (depcrstr, self.full_name, proto, offset, strides)) if self.bitset.inline: - self.bitset.dump(is_deprecated, self.full_name) + self.bitset.dump(is_deprecated, self.full_name, self) print("") def dump_pack_struct(self, is_deprecated): diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c index 1c3b33be6c40..72eb0de46b54 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_kms.c +++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c @@ -26,6 +26,7 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_plane.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "lcdif_drv.h" diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index 7ed2516b6de0..8cac0a275b7d 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -26,6 +26,7 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_plane.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "mxsfb_drv.h" diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index c88776d1e784..3b5757aed9c8 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -28,6 +28,7 @@ config DRM_NOUVEAU select THERMAL if ACPI && X86 select ACPI_VIDEO if ACPI && X86 select SND_HDA_COMPONENT if SND_HDA_CORE + select PM_DEVFREQ if ARCH_TEGRA help Choose this option for open-source NVIDIA support. diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index e97e39abf3a2..12b1dba8e05d 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2867,7 +2867,9 @@ nv50_display_create(struct drm_device *dev) } /* Assign the correct format modifiers */ - if (disp->disp->object.oclass >= TU102_DISP) + if (disp->disp->object.oclass >= GB202_DISP) + nouveau_display(dev)->format_modifiers = wndwca7e_modifiers; + else if (disp->disp->object.oclass >= TU102_DISP) nouveau_display(dev)->format_modifiers = wndwc57e_modifiers; else if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h index 15f9242b72ac..5d998f0319dc 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h @@ -104,4 +104,5 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder); extern const u64 disp50xx_modifiers[]; extern const u64 disp90xx_modifiers[]; extern const u64 wndwc57e_modifiers[]; +extern const u64 wndwca7e_modifiers[]; #endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index e2c55f4b9c5a..ef9e410babbf 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -786,13 +786,14 @@ nv50_wndw_destroy(struct drm_plane *plane) } /* This function assumes the format has already been validated against the plane - * and the modifier was validated against the device-wides modifier list at FB + * and the modifier was validated against the device-wide modifier list at FB * creation time. */ static bool nv50_plane_format_mod_supported(struct drm_plane *plane, u32 format, u64 modifier) { struct nouveau_drm *drm = nouveau_drm(plane->dev); + const struct drm_format_info *info = drm_format_info(format); uint8_t i; /* All chipsets can display all formats in linear layout */ @@ -800,13 +801,32 @@ static bool nv50_plane_format_mod_supported(struct drm_plane *plane, return true; if (drm->client.device.info.chipset < 0xc0) { - const struct drm_format_info *info = drm_format_info(format); const uint8_t kind = (modifier >> 12) & 0xff; if (!format) return false; for (i = 0; i < info->num_planes; i++) if ((info->cpp[i] != 4) && kind != 0x70) return false; + } else if (drm->client.device.info.chipset >= 0x1b2) { + const uint8_t slayout = ((modifier >> 22) & 0x1) | + ((modifier >> 25) & 0x6); + + if (!format) + return false; + + /* + * Note in practice this implies only formats where cpp is equal + * for each plane, or >= 4 for all planes, are supported. + */ + for (i = 0; i < info->num_planes; i++) { + if (((info->cpp[i] == 2) && slayout != 3) || + ((info->cpp[i] == 1) && slayout != 2) || + ((info->cpp[i] >= 4) && slayout != 1)) + return false; + + /* 24-bit not supported. It has yet another layout */ + WARN_ON(info->cpp[i] == 3); + } } return true; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c index 0d8e9a9d1a57..2cec8cfbd546 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c @@ -179,6 +179,39 @@ wndwca7e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) return 0; } +/**************************************************************** + * Log2(block height) ----------------------------+ * + * Page Kind ----------------------------------+ | * + * Gob Height/Page Kind Generation ------+ | | * + * Sector layout -------+ | | | * + * Compression ------+ | | | | */ +const u64 wndwca7e_modifiers[] = { /* | | | | | */ + /* 4cpp+ modifiers */ + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5), + /* 1cpp/8bpp modifiers */ + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 0), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 1), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 2), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 3), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 4), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 5), + /* 2cpp/16bpp modifiers */ + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 0), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 1), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 2), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 3), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 4), + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 5), + /* All formats support linear */ + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + static const struct nv50_wndw_func wndwca7e = { .acquire = wndwc37e_acquire, diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h index 8b58b668fc0c..c78ab11ec3ac 100644 --- a/drivers/gpu/drm/nouveau/include/nvfw/hs.h +++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h @@ -52,7 +52,9 @@ struct nvfw_hs_load_header_v2 { struct { u32 offset; u32 size; - } app[]; + u32 data_offset; + u32 data_size; + } app[] __counted_by(num_apps); }; const struct nvfw_hs_load_header_v2 *nvfw_hs_load_header_v2(struct nvkm_subdev *, const void *); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h index 22f74fc88cd7..57bc542780bb 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h @@ -9,6 +9,8 @@ struct nvkm_device_tegra { struct nvkm_device device; struct platform_device *pdev; + void __iomem *regs; + struct reset_control *rst; struct clk *clk; struct clk *clk_ref; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h index d5d8877064a7..6a09d397c651 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h @@ -134,4 +134,5 @@ int gf100_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int gp10b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index d59fd12268b9..6c26beeb427f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -57,7 +57,7 @@ nouveau_bo(struct ttm_buffer_object *bo) static inline void nouveau_bo_fini(struct nouveau_bo *bo) { - ttm_bo_put(&bo->bo); + ttm_bo_fini(&bo->bo); } extern struct ttm_device_funcs nouveau_bo_driver; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 805d0a87aa54..00515623a2cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -30,6 +30,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> @@ -764,7 +765,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime) { struct nouveau_display *disp = nouveau_display(dev); - drm_client_dev_suspend(dev, false); + drm_client_dev_suspend(dev); if (drm_drv_uses_atomic_modeset(dev)) { if (!runtime) { @@ -795,7 +796,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime) } } - drm_client_dev_resume(dev, false); + drm_client_dev_resume(dev); } int @@ -807,9 +808,9 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, uint32_t domain; int ret; - args->pitch = roundup(args->width * (args->bpp / 8), 256); - args->size = args->pitch * args->height; - args->size = roundup(args->size, PAGE_SIZE); + ret = drm_mode_size_dumb(dev, args, SZ_256, 0); + if (ret) + return ret; /* Use VRAM if there is any ; otherwise fallback to system memory */ if (nouveau_drm(dev)->client.device.info.ram_size != 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 55abc510067b..0e409414f44d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -10,7 +10,7 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 4 -#define DRIVER_PATCHLEVEL 0 +#define DRIVER_PATCHLEVEL 1 /* * 1.1.1: @@ -35,6 +35,8 @@ * programs that get directly linked with NVKM. * 1.3.1: * - implemented limited ABI16/NVIF interop + * 1.4.1: + * - add variable page sizes and compression for Turing+ */ #include <linux/notifier.h> @@ -49,6 +51,7 @@ #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 690e10fbf0bd..395d92ab6271 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -87,7 +87,7 @@ nouveau_gem_object_del(struct drm_gem_object *gem) return; } - ttm_bo_put(&nvbo->bo); + ttm_bo_fini(&nvbo->bo); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c index 8d5853deeee4..9fd351273236 100644 --- a/drivers/gpu/drm/nouveau/nouveau_platform.c +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -21,6 +21,8 @@ */ #include "nouveau_platform.h" +#include <nvkm/subdev/clk/gk20a_devfreq.h> + static int nouveau_platform_probe(struct platform_device *pdev) { const struct nvkm_device_tegra_func *func; @@ -40,6 +42,21 @@ static void nouveau_platform_remove(struct platform_device *pdev) nouveau_drm_device_remove(drm); } +#ifdef CONFIG_PM_SLEEP +static int nouveau_platform_suspend(struct device *dev) +{ + return gk20a_devfreq_suspend(dev); +} + +static int nouveau_platform_resume(struct device *dev) +{ + return gk20a_devfreq_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(nouveau_pm_ops, nouveau_platform_suspend, + nouveau_platform_resume); +#endif + #if IS_ENABLED(CONFIG_OF) static const struct nvkm_device_tegra_func gk20a_platform_data = { .iommu_bit = 34, @@ -81,6 +98,9 @@ struct platform_driver nouveau_platform_driver = { .driver = { .name = "nouveau", .of_match_table = of_match_ptr(nouveau_platform_match), +#ifdef CONFIG_PM_SLEEP + .pm = &nouveau_pm_ops, +#endif }, .probe = nouveau_platform_probe, .remove = nouveau_platform_remove, diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 7d2436e5d50d..0a55babdf667 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -302,8 +302,10 @@ nouveau_ttm_init(struct nouveau_drm *drm) ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev, dev->anon_inode->i_mapping, dev->vma_offset_manager, - drm_need_swiotlb(drm->client.mmu.dmabits), - drm->client.mmu.dmabits <= 32); + (drm_need_swiotlb(drm->client.mmu.dmabits) ? + TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) | + (drm->client.mmu.dmabits <= 32 ? + TTM_ALLOCATION_POOL_USE_DMA32 : 0)); if (ret) { NV_ERROR(drm, "error initialising bo driver, %d\n", ret); return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 79eefdfd08a2..f10809115c56 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -107,34 +107,34 @@ nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm, static int nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm, - u64 addr, u64 range) + u64 addr, u64 range, u8 page_shift) { struct nvif_vmm *vmm = &uvmm->vmm.vmm; - return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT); + return nvif_vmm_raw_get(vmm, addr, range, page_shift); } static int nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm, - u64 addr, u64 range) + u64 addr, u64 range, u8 page_shift) { struct nvif_vmm *vmm = &uvmm->vmm.vmm; - return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT); + return nvif_vmm_raw_put(vmm, addr, range, page_shift); } static int nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm, - u64 addr, u64 range, bool sparse) + u64 addr, u64 range, u8 page_shift, bool sparse) { struct nvif_vmm *vmm = &uvmm->vmm.vmm; - return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse); + return nvif_vmm_raw_unmap(vmm, addr, range, page_shift, sparse); } static int nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm, - u64 addr, u64 range, + u64 addr, u64 range, u8 page_shift, u64 bo_offset, u8 kind, struct nouveau_mem *mem) { @@ -163,7 +163,7 @@ nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm, return -ENOSYS; } - return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT, + return nvif_vmm_raw_map(vmm, addr, range, page_shift, &args, argc, &mem->mem, bo_offset); } @@ -182,8 +182,9 @@ nouveau_uvma_vmm_put(struct nouveau_uvma *uvma) { u64 addr = uvma->va.va.addr; u64 range = uvma->va.va.range; + u8 page_shift = uvma->page_shift; - return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range); + return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range, page_shift); } static int @@ -193,9 +194,11 @@ nouveau_uvma_map(struct nouveau_uvma *uvma, u64 addr = uvma->va.va.addr; u64 offset = uvma->va.gem.offset; u64 range = uvma->va.va.range; + u8 page_shift = uvma->page_shift; return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range, - offset, uvma->kind, mem); + page_shift, offset, uvma->kind, + mem); } static int @@ -203,12 +206,13 @@ nouveau_uvma_unmap(struct nouveau_uvma *uvma) { u64 addr = uvma->va.va.addr; u64 range = uvma->va.va.range; + u8 page_shift = uvma->page_shift; bool sparse = !!uvma->region; if (drm_gpuva_invalidated(&uvma->va)) return 0; - return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse); + return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, sparse); } static int @@ -450,6 +454,62 @@ op_unmap_prepare_unwind(struct drm_gpuva *va) drm_gpuva_insert(va->vm, va); } +static bool +op_map_aligned_to_page_shift(const struct drm_gpuva_op_map *op, u8 page_shift) +{ + u64 non_page_bits = (1ULL << page_shift) - 1; + + return (op->va.addr & non_page_bits) == 0 && + (op->va.range & non_page_bits) == 0 && + (op->gem.offset & non_page_bits) == 0; +} + +static u8 +select_page_shift(struct nouveau_uvmm *uvmm, struct drm_gpuva_op_map *op) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(op->gem.obj); + + /* nouveau_bo_fixup_align() guarantees that the page size will be aligned + * for most cases, but it can't handle cases where userspace allocates with + * a size and then binds with a smaller granularity. So in order to avoid + * breaking old userspace, we need to ensure that the VA is actually + * aligned before using it, and if it isn't, then we downgrade to the first + * granularity that will fit, which is optimal from a correctness and + * performance perspective. + */ + if (op_map_aligned_to_page_shift(op, nvbo->page)) + return nvbo->page; + + struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource); + struct nvif_vmm *vmm = &uvmm->vmm.vmm; + int i; + + /* If the given granularity doesn't fit, let's find one that will fit. */ + for (i = 0; i < vmm->page_nr; i++) { + /* Ignore anything that is bigger or identical to the BO preference. */ + if (vmm->page[i].shift >= nvbo->page) + continue; + + /* Skip incompatible domains. */ + if ((mem->mem.type & NVIF_MEM_VRAM) && !vmm->page[i].vram) + continue; + if ((mem->mem.type & NVIF_MEM_HOST) && + (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) + continue; + + /* If it fits, return the proposed shift. */ + if (op_map_aligned_to_page_shift(op, vmm->page[i].shift)) + return vmm->page[i].shift; + } + + /* If we get here then nothing can reconcile the requirements. This should never + * happen. + */ + drm_WARN_ONCE(op->gem.obj->dev, 1, "Could not find an appropriate page size.\n"); + + return PAGE_SHIFT; +} + static void nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, struct nouveau_uvma_prealloc *new, @@ -501,7 +561,8 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, if (vmm_get_range) nouveau_uvmm_vmm_put(uvmm, vmm_get_start, - vmm_get_range); + vmm_get_range, + select_page_shift(uvmm, &op->map)); break; } case DRM_GPUVA_OP_REMAP: { @@ -528,6 +589,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, u64 ustart = va->va.addr; u64 urange = va->va.range; u64 uend = ustart + urange; + u8 page_shift = uvma_from_va(va)->page_shift; /* Nothing to do for mappings we merge with. */ if (uend == vmm_get_start || @@ -538,7 +600,8 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, u64 vmm_get_range = ustart - vmm_get_start; nouveau_uvmm_vmm_put(uvmm, vmm_get_start, - vmm_get_range); + vmm_get_range, + page_shift); } vmm_get_start = uend; break; @@ -592,6 +655,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm, uvma->region = args->region; uvma->kind = args->kind; + uvma->page_shift = select_page_shift(uvmm, op); drm_gpuva_map(&uvmm->base, &uvma->va, op); @@ -633,7 +697,8 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, if (vmm_get_range) { ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start, - vmm_get_range); + vmm_get_range, + new->map->page_shift); if (ret) { op_map_prepare_unwind(new->map); goto unwind; @@ -689,6 +754,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, u64 ustart = va->va.addr; u64 urange = va->va.range; u64 uend = ustart + urange; + u8 page_shift = uvma_from_va(va)->page_shift; op_unmap_prepare(u); @@ -704,7 +770,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, u64 vmm_get_range = ustart - vmm_get_start; ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start, - vmm_get_range); + vmm_get_range, page_shift); if (ret) { op_unmap_prepare_unwind(va); goto unwind; @@ -799,10 +865,11 @@ op_unmap_range(struct drm_gpuva_op_unmap *u, u64 addr, u64 range) { struct nouveau_uvma *uvma = uvma_from_va(u->va); + u8 page_shift = uvma->page_shift; bool sparse = !!uvma->region; if (!drm_gpuva_invalidated(u->va)) - nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse); + nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, sparse); } static void @@ -882,6 +949,7 @@ nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm, struct drm_gpuva_op_map *n = r->next; struct drm_gpuva *va = r->unmap->va; struct nouveau_uvma *uvma = uvma_from_va(va); + u8 page_shift = uvma->page_shift; if (unmap) { u64 addr = va->va.addr; @@ -893,7 +961,7 @@ nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm, if (n) end = n->va.addr; - nouveau_uvmm_vmm_put(uvmm, addr, end - addr); + nouveau_uvmm_vmm_put(uvmm, addr, end - addr, page_shift); } nouveau_uvma_gem_put(uvma); diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h index 9d3c348581eb..51925711ae90 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h @@ -33,6 +33,7 @@ struct nouveau_uvma { struct nouveau_uvma_region *region; u8 kind; + u8 page_shift; }; #define uvmm_from_gpuvm(x) container_of((x), struct nouveau_uvmm, base) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 3375a59ebf1a..2517b65d8faa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2280,6 +2280,7 @@ nv13b_chipset = { .acr = { 0x00000001, gp10b_acr_new }, .bar = { 0x00000001, gm20b_bar_new }, .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gp10b_clk_new }, .fault = { 0x00000001, gp10b_fault_new }, .fb = { 0x00000001, gp10b_fb_new }, .fuse = { 0x00000001, gm107_fuse_new }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index 114e50ca1827..03aa6f09ec89 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -259,6 +259,10 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, tdev->func = func; tdev->pdev = pdev; + tdev->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(tdev->regs)) + return PTR_ERR(tdev->regs); + if (func->require_vdd) { tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); if (IS_ERR(tdev->vdd)) { diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c index cac6d64ab67d..4e8b3f1c7e25 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c @@ -159,6 +159,8 @@ nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw) nvkm_memory_unref(&fw->inst); nvkm_falcon_fw_dtor_sigs(fw); nvkm_firmware_dtor(&fw->fw); + kfree(fw->boot); + fw->boot = NULL; } static const struct nvkm_firmware_func diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild index dcecd499d8df..be8f3283ee16 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild @@ -10,6 +10,8 @@ nvkm-y += nvkm/subdev/clk/gf100.o nvkm-y += nvkm/subdev/clk/gk104.o nvkm-y += nvkm/subdev/clk/gk20a.o nvkm-y += nvkm/subdev/clk/gm20b.o +nvkm-y += nvkm/subdev/clk/gp10b.o +nvkm-$(CONFIG_PM_DEVFREQ) += nvkm/subdev/clk/gk20a_devfreq.o nvkm-y += nvkm/subdev/clk/pllnv04.o nvkm-y += nvkm/subdev/clk/pllgt215.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c index d573fb0917fc..65f5d0f1f3bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c @@ -23,6 +23,7 @@ * */ #include "priv.h" +#include "gk20a_devfreq.h" #include "gk20a.h" #include <core/tegra.h> @@ -589,6 +590,10 @@ gk20a_clk_init(struct nvkm_clk *base) return ret; } + ret = gk20a_devfreq_init(base, &clk->devfreq); + if (ret) + return ret; + return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h index 286413ff4a9e..ea5b0bab4cce 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h @@ -118,6 +118,7 @@ struct gk20a_clk { const struct gk20a_clk_pllg_params *params; struct gk20a_pll pll; u32 parent_rate; + struct gk20a_devfreq *devfreq; u32 (*div_to_pl)(u32); u32 (*pl_to_div)(u32); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c new file mode 100644 index 000000000000..41003cbcdbfa --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: MIT +#include <linux/clk.h> +#include <linux/math64.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> + +#include <drm/drm_managed.h> + +#include <subdev/clk.h> + +#include "nouveau_drv.h" +#include "nouveau_chan.h" +#include "priv.h" +#include "gk20a_devfreq.h" +#include "gk20a.h" +#include "gp10b.h" + +#define PMU_BUSY_CYCLES_NORM_MAX 1000U + +#define PWR_PMU_IDLE_COUNTER_TOTAL 0U +#define PWR_PMU_IDLE_COUNTER_BUSY 4U + +#define PWR_PMU_IDLE_COUNT_REG_OFFSET 0x0010A508U +#define PWR_PMU_IDLE_COUNT_REG_SIZE 16U +#define PWR_PMU_IDLE_COUNT_MASK 0x7FFFFFFFU +#define PWR_PMU_IDLE_COUNT_RESET_VALUE (0x1U << 31U) + +#define PWR_PMU_IDLE_INTR_REG_OFFSET 0x0010A9E8U +#define PWR_PMU_IDLE_INTR_ENABLE_VALUE 0U + +#define PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET 0x0010A9ECU +#define PWR_PMU_IDLE_INTR_STATUS_MASK 0x00000001U +#define PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE 0x1U + +#define PWR_PMU_IDLE_THRESHOLD_REG_OFFSET 0x0010A8A0U +#define PWR_PMU_IDLE_THRESHOLD_REG_SIZE 4U +#define PWR_PMU_IDLE_THRESHOLD_MAX_VALUE 0x7FFFFFFFU + +#define PWR_PMU_IDLE_CTRL_REG_OFFSET 0x0010A50CU +#define PWR_PMU_IDLE_CTRL_REG_SIZE 16U +#define PWR_PMU_IDLE_CTRL_VALUE_MASK 0x3U +#define PWR_PMU_IDLE_CTRL_VALUE_BUSY 0x2U +#define PWR_PMU_IDLE_CTRL_VALUE_ALWAYS 0x3U +#define PWR_PMU_IDLE_CTRL_FILTER_MASK (0x1U << 2) +#define PWR_PMU_IDLE_CTRL_FILTER_DISABLED 0x0U + +#define PWR_PMU_IDLE_MASK_REG_OFFSET 0x0010A504U +#define PWR_PMU_IDLE_MASK_REG_SIZE 16U +#define PWM_PMU_IDLE_MASK_GR_ENABLED 0x1U +#define PWM_PMU_IDLE_MASK_CE_2_ENABLED 0x200000U + +/** + * struct gk20a_devfreq - Device frequency management + */ +struct gk20a_devfreq { + /** @devfreq: devfreq device. */ + struct devfreq *devfreq; + + /** @regs: Device registers. */ + void __iomem *regs; + + /** @gov_data: Governor data. */ + struct devfreq_simple_ondemand_data gov_data; + + /** @busy_time: Busy time. */ + ktime_t busy_time; + + /** @total_time: Total time. */ + ktime_t total_time; + + /** @time_last_update: Last update time. */ + ktime_t time_last_update; +}; + +static struct gk20a_devfreq *dev_to_gk20a_devfreq(struct device *dev) +{ + struct nouveau_drm *drm = dev_get_drvdata(dev); + struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); + struct nvkm_clk *base = nvkm_clk(subdev); + + switch (drm->nvkm->chipset) { + case 0x13b: return gp10b_clk(base)->devfreq; break; + default: return gk20a_clk(base)->devfreq; break; + } +} + +static void gk20a_pmu_init_perfmon_counter(struct gk20a_devfreq *gdevfreq) +{ + u32 data; + + // Set pmu idle intr status bit on total counter overflow + writel(PWR_PMU_IDLE_INTR_ENABLE_VALUE, + gdevfreq->regs + PWR_PMU_IDLE_INTR_REG_OFFSET); + + writel(PWR_PMU_IDLE_THRESHOLD_MAX_VALUE, + gdevfreq->regs + PWR_PMU_IDLE_THRESHOLD_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_THRESHOLD_REG_SIZE)); + + // Setup counter for total cycles + data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE)); + data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK); + data |= PWR_PMU_IDLE_CTRL_VALUE_ALWAYS | PWR_PMU_IDLE_CTRL_FILTER_DISABLED; + writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE)); + + // Setup counter for busy cycles + writel(PWM_PMU_IDLE_MASK_GR_ENABLED | PWM_PMU_IDLE_MASK_CE_2_ENABLED, + gdevfreq->regs + PWR_PMU_IDLE_MASK_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_MASK_REG_SIZE)); + + data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE)); + data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK); + data |= PWR_PMU_IDLE_CTRL_VALUE_BUSY | PWR_PMU_IDLE_CTRL_FILTER_DISABLED; + writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE)); +} + +static u32 gk20a_pmu_read_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id) +{ + u32 ret; + + ret = readl(gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET + + (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE)); + + return ret & PWR_PMU_IDLE_COUNT_MASK; +} + +static void gk20a_pmu_reset_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id) +{ + writel(PWR_PMU_IDLE_COUNT_RESET_VALUE, gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET + + (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE)); +} + +static u32 gk20a_pmu_read_idle_intr_status(struct gk20a_devfreq *gdevfreq) +{ + u32 ret; + + ret = readl(gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET); + + return ret & PWR_PMU_IDLE_INTR_STATUS_MASK; +} + +static void gk20a_pmu_clear_idle_intr_status(struct gk20a_devfreq *gdevfreq) +{ + writel(PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE, + gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET); +} + +static void gk20a_devfreq_update_utilization(struct gk20a_devfreq *gdevfreq) +{ + ktime_t now, last; + u64 busy_cycles, total_cycles; + u32 norm, intr_status; + + now = ktime_get(); + last = gdevfreq->time_last_update; + gdevfreq->total_time = ktime_us_delta(now, last); + + busy_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); + total_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); + intr_status = gk20a_pmu_read_idle_intr_status(gdevfreq); + + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); + + if (intr_status != 0UL) { + norm = PMU_BUSY_CYCLES_NORM_MAX; + gk20a_pmu_clear_idle_intr_status(gdevfreq); + } else if (total_cycles == 0ULL || busy_cycles > total_cycles) { + norm = PMU_BUSY_CYCLES_NORM_MAX; + } else { + norm = (u32)div64_u64(busy_cycles * PMU_BUSY_CYCLES_NORM_MAX, + total_cycles); + } + + gdevfreq->busy_time = div_u64(gdevfreq->total_time * norm, PMU_BUSY_CYCLES_NORM_MAX); + gdevfreq->time_last_update = now; +} + +static int gk20a_devfreq_target(struct device *dev, unsigned long *freq, + u32 flags) +{ + struct nouveau_drm *drm = dev_get_drvdata(dev); + struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); + struct nvkm_clk *base = nvkm_clk(subdev); + struct nvkm_pstate *pstates = base->func->pstates; + int nr_pstates = base->func->nr_pstates; + int i, ret; + + for (i = 0; i < nr_pstates - 1; i++) + if (pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV >= *freq) + break; + + ret = nvkm_clk_ustate(base, pstates[i].pstate, 0); + ret |= nvkm_clk_ustate(base, pstates[i].pstate, 1); + if (ret) { + nvkm_error(subdev, "cannot update clock\n"); + return ret; + } + + *freq = pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV; + + return 0; +} + +static int gk20a_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct nouveau_drm *drm = dev_get_drvdata(dev); + struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); + struct nvkm_clk *base = nvkm_clk(subdev); + + *freq = nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV; + + return 0; +} + +static void gk20a_devfreq_reset(struct gk20a_devfreq *gdevfreq) +{ + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); + gk20a_pmu_clear_idle_intr_status(gdevfreq); + + gdevfreq->busy_time = 0; + gdevfreq->total_time = 0; + gdevfreq->time_last_update = ktime_get(); +} + +static int gk20a_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *status) +{ + struct nouveau_drm *drm = dev_get_drvdata(dev); + struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); + + gk20a_devfreq_get_cur_freq(dev, &status->current_frequency); + + gk20a_devfreq_update_utilization(gdevfreq); + + status->busy_time = ktime_to_ns(gdevfreq->busy_time); + status->total_time = ktime_to_ns(gdevfreq->total_time); + + gk20a_devfreq_reset(gdevfreq); + + NV_DEBUG(drm, "busy %lu total %lu %lu %% freq %lu MHz\n", + status->busy_time, status->total_time, + status->busy_time / (status->total_time / 100), + status->current_frequency / 1000 / 1000); + + return 0; +} + +static struct devfreq_dev_profile gk20a_devfreq_profile = { + .timer = DEVFREQ_TIMER_DELAYED, + .polling_ms = 50, + .target = gk20a_devfreq_target, + .get_cur_freq = gk20a_devfreq_get_cur_freq, + .get_dev_status = gk20a_devfreq_get_dev_status, +}; + +int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **gdevfreq) +{ + struct nvkm_device *device = base->subdev.device; + struct nouveau_drm *drm = dev_get_drvdata(device->dev); + struct nvkm_device_tegra *tdev = device->func->tegra(device); + struct nvkm_pstate *pstates = base->func->pstates; + int nr_pstates = base->func->nr_pstates; + struct gk20a_devfreq *new_gdevfreq; + int i; + + new_gdevfreq = drmm_kzalloc(drm->dev, sizeof(struct gk20a_devfreq), GFP_KERNEL); + if (!new_gdevfreq) + return -ENOMEM; + + new_gdevfreq->regs = tdev->regs; + + for (i = 0; i < nr_pstates; i++) + dev_pm_opp_add(base->subdev.device->dev, + pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV, 0); + + gk20a_pmu_init_perfmon_counter(new_gdevfreq); + gk20a_devfreq_reset(new_gdevfreq); + + gk20a_devfreq_profile.initial_freq = + nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV; + + new_gdevfreq->gov_data.upthreshold = 45; + new_gdevfreq->gov_data.downdifferential = 5; + + new_gdevfreq->devfreq = devm_devfreq_add_device(device->dev, + &gk20a_devfreq_profile, + DEVFREQ_GOV_SIMPLE_ONDEMAND, + &new_gdevfreq->gov_data); + if (IS_ERR(new_gdevfreq->devfreq)) + return PTR_ERR(new_gdevfreq->devfreq); + + *gdevfreq = new_gdevfreq; + + return 0; +} + +int gk20a_devfreq_resume(struct device *dev) +{ + struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); + + if (!gdevfreq || !gdevfreq->devfreq) + return 0; + + return devfreq_resume_device(gdevfreq->devfreq); +} + +int gk20a_devfreq_suspend(struct device *dev) +{ + struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); + + if (!gdevfreq || !gdevfreq->devfreq) + return 0; + + return devfreq_suspend_device(gdevfreq->devfreq); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h new file mode 100644 index 000000000000..5b7ca8a7a5cd --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __GK20A_DEVFREQ_H__ +#define __GK20A_DEVFREQ_H__ + +#include <linux/devfreq.h> + +struct gk20a_devfreq; + +#if defined(CONFIG_PM_DEVFREQ) +int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq); + +int gk20a_devfreq_resume(struct device *dev); +int gk20a_devfreq_suspend(struct device *dev); +#else +static inline int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq) +{ + return 0; +} + +static inline int gk20a_devfreq_resume(struct device dev) { return 0; } +static inline int gk20a_devfreq_suspend(struct device *dev) { return 0; } +#endif /* CONFIG_PM_DEVFREQ */ + +#endif /* __GK20A_DEVFREQ_H__ */ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c index 7c33542f651b..fa8ca53acbd1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c @@ -27,6 +27,7 @@ #include <core/tegra.h> #include "priv.h" +#include "gk20a_devfreq.h" #include "gk20a.h" #define GPCPLL_CFG_SYNC_MODE BIT(2) @@ -869,6 +870,10 @@ gm20b_clk_init(struct nvkm_clk *base) return ret; } + ret = gk20a_devfreq_init(base, &clk->devfreq); + if (ret) + return ret; + return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c new file mode 100644 index 000000000000..492b62c0ee96 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: MIT +#include <subdev/clk.h> +#include <subdev/timer.h> +#include <core/device.h> +#include <core/tegra.h> + +#include "priv.h" +#include "gk20a_devfreq.h" +#include "gk20a.h" +#include "gp10b.h" + +static int +gp10b_clk_init(struct nvkm_clk *base) +{ + struct gp10b_clk *clk = gp10b_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + int ret; + + /* Start with the highest frequency, matching the BPMP default */ + base->func->calc(base, &base->func->pstates[base->func->nr_pstates - 1].base); + ret = base->func->prog(base); + if (ret) { + nvkm_error(subdev, "cannot initialize clock\n"); + return ret; + } + + ret = gk20a_devfreq_init(base, &clk->devfreq); + if (ret) + return ret; + + return 0; +} + +static int +gp10b_clk_read(struct nvkm_clk *base, enum nv_clk_src src) +{ + struct gp10b_clk *clk = gp10b_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + + switch (src) { + case nv_clk_src_gpc: + return clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV; + default: + nvkm_error(subdev, "invalid clock source %d\n", src); + return -EINVAL; + } +} + +static int +gp10b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct gp10b_clk *clk = gp10b_clk(base); + u32 target_rate = cstate->domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV; + + clk->new_rate = clk_round_rate(clk->clk, target_rate) / GK20A_CLK_GPC_MDIV; + + return 0; +} + +static int +gp10b_clk_prog(struct nvkm_clk *base) +{ + struct gp10b_clk *clk = gp10b_clk(base); + int ret; + + ret = clk_set_rate(clk->clk, clk->new_rate * GK20A_CLK_GPC_MDIV); + if (ret < 0) + return ret; + + clk->rate = clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV; + + return 0; +} + +static struct nvkm_pstate +gp10b_pstates[] = { + { + .base = { + .domain[nv_clk_src_gpc] = 114750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 216750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 318750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 420750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 522750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 624750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 726750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 828750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 930750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 1032750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 1134750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 1236750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 1300500, + }, + }, +}; + +static const struct nvkm_clk_func +gp10b_clk = { + .init = gp10b_clk_init, + .read = gp10b_clk_read, + .calc = gp10b_clk_calc, + .prog = gp10b_clk_prog, + .tidy = gk20a_clk_tidy, + .pstates = gp10b_pstates, + .nr_pstates = ARRAY_SIZE(gp10b_pstates), + .domains = { + { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, + { nv_clk_src_max } + } +}; + +int +gp10b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + struct nvkm_device_tegra *tdev = device->func->tegra(device); + const struct nvkm_clk_func *func = &gp10b_clk; + struct gp10b_clk *clk; + int ret, i; + + clk = kzalloc(sizeof(*clk), GFP_KERNEL); + if (!clk) + return -ENOMEM; + *pclk = &clk->base; + clk->clk = tdev->clk; + + /* Finish initializing the pstates */ + for (i = 0; i < func->nr_pstates; i++) { + INIT_LIST_HEAD(&func->pstates[i].list); + func->pstates[i].pstate = i + 1; + } + + ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h new file mode 100644 index 000000000000..178e3bcdbbf7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_CLK_GP10B_H__ +#define __NVKM_CLK_GP10B_H__ + +struct gp10b_clk { + /* currently applied parameters */ + struct nvkm_clk base; + struct gk20a_devfreq *devfreq; + struct clk *clk; + u32 rate; + + /* new parameters to apply */ + u32 new_rate; +}; + +#define gp10b_clk(p) container_of((p), struct gp10b_clk, base) + +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index 8a286a9349ac..7ce1b65e2c1c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c @@ -279,7 +279,7 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device, mutex_init(&fb->tags.mutex); if (func->sysmem.flush_page_init) { - fb->sysmem.flush_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + fb->sysmem.flush_page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (!fb->sysmem.flush_page) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c index 1c78c8853617..170776cc82fb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c @@ -15,6 +15,9 @@ gb100_fb_sysmem_flush_page_init(struct nvkm_fb *fb) const u32 hshub = DRF_LO(NV_PFB_HSHUB0); struct nvkm_device *device = fb->subdev.device; + // Ensure that the address is within hardware limits + WARN_ON(fb->sysmem.flush_page_addr > DMA_BIT_MASK(52)); + nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi); nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo); nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c index 848505026d02..a21bf19e1041 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c @@ -13,6 +13,9 @@ gb202_fb_sysmem_flush_page_init(struct nvkm_fb *fb) struct nvkm_device *device = fb->subdev.device; const u64 addr = fb->sysmem.flush_page_addr; + // Ensure that the address is within hardware limits + WARN_ON(fb->sysmem.flush_page_addr > DMA_BIT_MASK(52)); + nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr)); nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr)); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c index 07db9b397ac1..64281a09fb39 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c @@ -80,6 +80,9 @@ gf100_fb_init_page(struct nvkm_fb *fb) void gf100_fb_sysmem_flush_page_init(struct nvkm_fb *fb) { + // Ensure that the address can actually fit in the register + WARN_ON(fb->sysmem.flush_page_addr > DMA_BIT_MASK(40)); + nvkm_wr32(fb->subdev.device, 0x100c10, fb->sysmem.flush_page_addr >> 8); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c index 2d8c51f882d5..8c9394048f25 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c @@ -13,6 +13,9 @@ gh100_fb_sysmem_flush_page_init(struct nvkm_fb *fb) const u64 addr = fb->sysmem.flush_page_addr >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT; struct nvkm_device *device = fb->subdev.device; + // Ensure that the address is within hardware limits + WARN_ON(fb->sysmem.flush_page_addr > DMA_BIT_MASK(52)); + nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr)); nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr)); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c index a6efbd913c13..076d968b7297 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c @@ -214,6 +214,9 @@ nv50_fb_tags(struct nvkm_fb *base) static void nv50_fb_sysmem_flush_page_init(struct nvkm_fb *fb) { + // Ensure that the address can actually fit in the register + WARN_ON(fb->sysmem.flush_page_addr > DMA_BIT_MASK(40)); + nvkm_wr32(fb->subdev.device, 0x100c08, fb->sysmem.flush_page_addr >> 8); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index 851fd847a2a9..ed15a4475181 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -21,9 +21,7 @@ */ #include "vmm.h" -#include <core/client.h> #include <subdev/fb.h> -#include <subdev/ltc.h> #include <subdev/timer.h> #include <engine/gr.h> @@ -111,13 +109,33 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, nvkm_done(pt->memory); } +static inline u64 +gp100_vmm_comptag_nr(u64 size) +{ + return size >> 16; /* One comptag per 64KiB VRAM. */ +} + +static inline u64 +gp100_vmm_pte_comptagline_base(u64 addr) +{ + /* RM allocates enough comptags for all of VRAM, so use a 1:1 mapping. */ + return (1 + gp100_vmm_comptag_nr(addr)) << 36; /* NV_MMU_VER2_PTE_COMPTAGLINE */ +} + +static inline u64 +gp100_vmm_pte_comptagline_incr(u32 page_size) +{ + return gp100_vmm_comptag_nr(page_size) << 36; /* NV_MMU_VER2_PTE_COMPTAGLINE */ +} + static inline void gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) { u64 data = (addr >> 4) | map->type; - map->type += ptes * map->ctag; + if (map->ctag) + data |= gp100_vmm_pte_comptagline_base(addr); while (ptes--) { VMM_WO064(pt, vmm, ptei++ * 8, data); @@ -142,7 +160,6 @@ gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, while (ptes--) { const u64 data = (*map->dma++ >> 4) | map->type; VMM_WO064(pt, vmm, ptei++ * 8, data); - map->type += map->ctag; } nvkm_done(pt->memory); return; @@ -200,7 +217,8 @@ gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, { u64 data = (addr >> 4) | map->type; - map->type += ptes * map->ctag; + if (map->ctag) + data |= gp100_vmm_pte_comptagline_base(addr); while (ptes--) { VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL); @@ -411,8 +429,6 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, struct gp100_vmm_map_vn vn; struct gp100_vmm_map_v0 v0; } *args = argv; - struct nvkm_device *device = vmm->mmu->subdev.device; - struct nvkm_memory *memory = map->memory; u8 kind, kind_inv, priv, ro, vol; int kindn, aper, ret = -ENOSYS; const u8 *kindm; @@ -449,29 +465,24 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, return -EINVAL; } + /* Handle compression. */ if (kindm[kind] != kind) { - u64 tags = nvkm_memory_size(memory) >> 16; - if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) { - VMM_DEBUG(vmm, "comp %d %02x", aper, page->type); - return -EINVAL; - } - - if (!map->no_comp) { - ret = nvkm_memory_tags_get(memory, device, tags, - nvkm_ltc_tags_clear, - &map->tags); - if (ret) { - VMM_DEBUG(vmm, "comp %d", ret); - return ret; + struct nvkm_device *device = vmm->mmu->subdev.device; + + /* Compression is only supported when using GSP-RM, as + * PMU firmware is required in order to initialise the + * compbit backing store. + */ + if (nvkm_gsp_rm(device->gsp)) { + /* Turing GPUs require PTE_COMPTAGLINE to be filled, + * in addition to specifying a compressed kind. + */ + if (device->card_type < GA100) { + map->ctag = gp100_vmm_pte_comptagline_incr(1 << map->page->shift); + map->next |= map->ctag; } - } - - if (!map->no_comp && map->tags->mn) { - tags = map->tags->mn->offset + (map->offset >> 16); - map->ctag |= ((1ULL << page->shift) >> 16) << 36; - map->type |= tags << 36; - map->next |= map->ctag; } else { + /* Revert to non-compressed kind. */ kind = kindm[kind]; } } @@ -592,8 +603,8 @@ gp100_vmm = { { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx }, { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx }, { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx }, - { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC }, - { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC }, + { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxx }, + { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxx }, { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx }, {} } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c index e081239afe58..5791d134962b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c @@ -34,8 +34,8 @@ gp10b_vmm = { { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx }, { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx }, { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx }, - { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SxHC }, - { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SxHC }, + { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SxHx }, + { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SxHx }, { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SxHx }, {} } diff --git a/drivers/gpu/drm/nova/Kconfig b/drivers/gpu/drm/nova/Kconfig index cca6a3fea879..3e637ad7b5ba 100644 --- a/drivers/gpu/drm/nova/Kconfig +++ b/drivers/gpu/drm/nova/Kconfig @@ -1,9 +1,11 @@ config DRM_NOVA tristate "Nova DRM driver" + depends on 64BIT depends on DRM=y depends on PCI depends on RUST select AUXILIARY_BUS + select NOVA_CORE default n help Choose this if you want to build the Nova DRM driver for Nvidia diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 63ddc5127f7b..1c2a1920c0a6 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -10,6 +10,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_mode.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "omap_drv.h" diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index a3d470468e5b..9edc1b3f9f95 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c @@ -11,6 +11,7 @@ #include <drm/drm_file.h> #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include "omap_drv.h" #include "omap_dmm_tiler.h" diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 3fff32c000a6..bbe427ab43c1 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -26,6 +26,8 @@ #include <linux/vmalloc.h> #include <linux/wait.h> +#include <drm/drm_print.h> + #include "omap_dmm_tiler.h" #include "omap_dmm_priv.h" diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 794267f0f007..1b96343226a5 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -19,6 +19,7 @@ #include <drm/drm_ioctl.h> #include <drm/drm_panel.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 4dd05bc732da..195715b162e3 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -77,7 +77,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, struct omap_dss_device *output = omap_encoder->output; struct drm_device *dev = encoder->dev; struct drm_connector *connector; - struct drm_bridge *bridge; struct videomode vm = { 0 }; u32 bus_flags; @@ -97,8 +96,7 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, * * A better solution is to use DRM's bus-flags through the whole driver. */ - for (bridge = output->bridge; bridge; - bridge = drm_bridge_get_next_bridge(bridge)) { + drm_for_each_bridge_in_chain_from(output->bridge, bridge) { if (!bridge->timings) continue; diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index bb3105556f19..b8c249ec1891 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -12,6 +12,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include "omap_dmm_tiler.h" #include "omap_drv.h" diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 948af7ec1130..ca3fb186bf19 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -15,6 +15,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <drm/drm_util.h> #include "omap_drv.h" @@ -103,8 +104,6 @@ static void omap_fbdev_fb_destroy(struct fb_info *info) drm_framebuffer_remove(fb); drm_client_release(&helper->client); - drm_fb_helper_unprepare(helper); - kfree(helper); } /* @@ -155,9 +154,9 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, struct drm_device *dev = helper->dev; struct omap_drm_private *priv = dev->dev_private; struct omap_fbdev *fbdev = priv->fbdev; + struct fb_info *fbi = helper->info; struct drm_framebuffer *fb = NULL; union omap_gem_size gsize; - struct fb_info *fbi = NULL; struct drm_mode_fb_cmd2 mode_cmd = {0}; struct drm_gem_object *bo; dma_addr_t dma_addr; @@ -226,13 +225,6 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, goto fail; } - fbi = drm_fb_helper_alloc_info(helper); - if (IS_ERR(fbi)) { - dev_err(dev->dev, "failed to allocate fb info\n"); - ret = PTR_ERR(fbi); - goto fail; - } - DBG("fbi=%p, dev=%p", fbi, dev); helper->funcs = &omap_fbdev_helper_funcs; diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 381552bfb409..71e79f53489a 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -10,7 +10,9 @@ #include <linux/spinlock.h> #include <linux/vmalloc.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/drm_vma_manager.h> #include "omap_drv.h" @@ -580,15 +582,13 @@ static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - union omap_gem_size gsize; - - args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); - - args->size = PAGE_ALIGN(args->pitch * args->height); + union omap_gem_size gsize = { }; + int ret; - gsize = (union omap_gem_size){ - .bytes = args->size, - }; + ret = drm_mode_size_dumb(dev, args, SZ_8, 0); + if (ret) + return ret; + gsize.bytes = args->size; return omap_gem_new_handle(dev, file, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index a6f0bbc879d2..943c5307da00 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c @@ -5,6 +5,7 @@ */ #include <drm/drm_vblank.h> +#include <drm/drm_print.h> #include "omap_drv.h" diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.c b/drivers/gpu/drm/omapdrm/omap_overlay.c index fb97c74386f2..6fb7510cbebb 100644 --- a/drivers/gpu/drm/omapdrm/omap_overlay.c +++ b/drivers/gpu/drm/omapdrm/omap_overlay.c @@ -6,6 +6,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_print.h> #include "omap_dmm_tiler.h" #include "omap_drv.h" diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 24a2ded08b45..f9698890c989 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -10,6 +10,7 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include "omap_dmm_tiler.h" #include "omap_drv.h" @@ -229,7 +230,7 @@ static int omap_plane_atomic_check(struct drm_plane *plane, if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); /* we should have a crtc state if the plane is attached to a crtc */ if (WARN_ON(!crtc_state)) return 0; diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 407c5f6a268b..76f6af819037 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -408,6 +408,19 @@ config DRM_PANEL_LG_LB035Q02 (found on the Gumstix Overo Palo35 board). To compile this driver as a module, choose M here. +config DRM_PANEL_LG_LD070WX3 + tristate "LG LD070WX3 MIPI DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select VIDEOMODE_HELPERS + help + Say Y here if you want to enable support for the LD070WX3 MIPI DSI + panel found in the NVIDIA Tegra Note 7 tablet. + + To compile this driver as a module, choose M here: the module will + be called panel-lg-ld070wx3. + config DRM_PANEL_LG_LG4573 tristate "LG4573 RGB/SPI panel" depends on OF && SPI @@ -801,6 +814,19 @@ config DRM_PANEL_SAMSUNG_S6D7AA0 select DRM_MIPI_DSI select VIDEOMODE_HELPERS +config DRM_PANEL_SAMSUNG_S6E3FC2X01 + tristate "Samsung S6E3FC2X01 DSI panel controller" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select VIDEOMODE_HELPERS + help + Say Y or M here if you want to enable support for the + Samsung S6E3FC2 DDIC and connected MIPI DSI panel. + Currently supported panels: + + Samsung AMS641RW (found in the OnePlus 6T smartphone) + config DRM_PANEL_SAMSUNG_S6E3HA2 tristate "Samsung S6E3HA2 DSI video mode panel" depends on OF @@ -868,16 +894,17 @@ config DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01 DSI protocol with 4 lanes. config DRM_PANEL_SAMSUNG_SOFEF00 - tristate "Samsung sofef00/s6e3fc2x01 OnePlus 6/6T DSI cmd mode panels" + tristate "Samsung SOFEF00 DSI panel controller" depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE select VIDEOMODE_HELPERS help Say Y or M here if you want to enable support for the Samsung AMOLED - command mode panels found in the OnePlus 6/6T smartphones. + panel SOFEF00 DDIC and connected panel. + Currently supported panels: - The panels are 2280x1080@60Hz and 2340x1080@60Hz respectively + Samsung AMS628NW01 (found in OnePlus 6, 1080x2280@60Hz) config DRM_PANEL_SEIKO_43WVF1G tristate "Seiko 43WVF1G panel" @@ -888,6 +915,21 @@ config DRM_PANEL_SEIKO_43WVF1G Say Y here if you want to enable support for the Seiko 43WVF1G controller for 800x480 LCD panels +config DRM_PANEL_SHARP_LQ079L1SX01 + tristate "Sharp LQ079L1SX01 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select VIDEOMODE_HELPERS + help + Say Y here if you want to enable support for Sharp LQ079L1SX01 + TFT-LCD modules. The panel has a 2560x1600 resolution and uses + 24 bit RGB per pixel. It provides a dual MIPI DSI interface to + the host. + + To compile this driver as a module, choose M here: the module + will be called panel-sharp-lq079l1sx01. + config DRM_PANEL_SHARP_LQ101R1SX01 tristate "Sharp LQ101R1SX01 panel" depends on OF @@ -1045,6 +1087,17 @@ config DRM_PANEL_SYNAPTICS_R63353 Say Y if you want to enable support for panels based on the Synaptics R63353 controller. +config DRM_PANEL_SYNAPTICS_TDDI + tristate "Synaptics TDDI display panels" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y if you want to enable support for the Synaptics TDDI display + panels. There are multiple MIPI DSI panels manufactured under the TDDI + namesake, with varying resolutions and data lanes. They also have a + built-in LED backlight and a touch controller. + config DRM_PANEL_TDO_TL070WSH30 tristate "TDO TL070WSH30 DSI panel" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 3615a761b44f..b9562a6fdcb3 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o obj-$(CONFIG_DRM_PANEL_LINCOLNTECH_LCD197) += panel-lincolntech-lcd197.o obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o +obj-$(CONFIG_DRM_PANEL_LG_LD070WX3) += panel-lg-ld070wx3.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o obj-$(CONFIG_DRM_PANEL_LG_SW43408) += panel-lg-sw43408.o obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o @@ -79,6 +80,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0) += panel-samsung-s6d7aa0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FA7) += panel-samsung-s6e3fa7.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FC2X01) += panel-samsung-s6e3fc2x01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA8) += panel-samsung-s6e3ha8.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o @@ -91,6 +93,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01) += panel-samsung-s6e8aa5x01-ams561ra01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o +obj-$(CONFIG_DRM_PANEL_SHARP_LQ079L1SX01) += panel-sharp-lq079l1sx01.o obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o @@ -100,6 +103,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o obj-$(CONFIG_DRM_PANEL_SUMMIT) += panel-summit.o obj-$(CONFIG_DRM_PANEL_SYNAPTICS_R63353) += panel-synaptics-r63353.o +obj-$(CONFIG_DRM_PANEL_SYNAPTICS_TDDI) += panel-synaptics-tddi.o obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 62435e3cd9f4..415b894890ad 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1888,6 +1888,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x203d, &delay_200_500_e50, "B140HTN02.0"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x205c, &delay_200_500_e50, "B116XAN02.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x208d, &delay_200_500_e50, "B140HTN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"), @@ -1909,6 +1910,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x8bba, &delay_200_500_e50, "B140UAN08.5"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0xb7a9, &delay_200_500_e50, "B140HAK03.3"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xcdba, &delay_200_500_e50, "B140UAX01.2"), @@ -1963,6 +1965,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80_d50, "NV116WHM-N49"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a6a, &delay_200_500_e80, "NV140WUM-N44"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a84, &delay_200_500_e50, "NV133WUM-T01"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"), @@ -1974,6 +1977,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf2, &delay_200_500_e200, "NV156FHM-N4S"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200, "NV140WUM-N64"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d45, &delay_200_500_e80, "NV116WHM-N4B"), @@ -2007,10 +2011,12 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'M', 'N', 0x1441, &delay_200_500_e80_d50, "N140JCA-ELK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x148f, &delay_200_500_e80, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14a8, &delay_200_500_e80, "N140JCA-ELP"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1565, &delay_200_500_e80, "N156HCA-EAB"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x7402, &delay_200_500_e200_d50, "N116BCA-EAK"), @@ -2022,10 +2028,12 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x143f, &delay_200_500_e50, "MNE007QS3-6"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"), + EDP_PANEL_ENTRY('C', 'S', 'W', 0x144b, &delay_200_500_e80, "MNE001BS1-4"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1462, &delay_200_500_e50, "MNE007QS5-2"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1468, &delay_200_500_e50, "MNE007QB2-2"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x146e, &delay_80_500_e50_d50, "MNE007QB3-1"), + EDP_PANEL_ENTRY('C', 'S', 'W', 0x1519, &delay_200_500_e80_d50, "MNF601BS1-3"), EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"), @@ -2046,6 +2054,8 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('K', 'D', 'B', 0x1212, &delay_200_500_e50, "KD116N0930A16"), EDP_PANEL_ENTRY('K', 'D', 'B', 0x1707, &delay_200_150_e50, "KD116N2130B12"), + EDP_PANEL_ENTRY('K', 'D', 'C', 0x0110, &delay_200_500_e50, "KD116N3730A07"), + EDP_PANEL_ENTRY('K', 'D', 'C', 0x0397, &delay_200_500_e50, "KD116N3730A12"), EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"), EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"), EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"), diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index ad4993b2f92a..947b47841b01 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -100,7 +100,7 @@ static const struct ili9881c_instr lhr050h41_init[] = { ILI9881C_COMMAND_INSTR(0x13, 0x00), ILI9881C_COMMAND_INSTR(0x14, 0x00), ILI9881C_COMMAND_INSTR(0x15, 0x00), - ILI9881C_COMMAND_INSTR(0x16, 0x0C), + ILI9881C_COMMAND_INSTR(0x16, 0x0c), ILI9881C_COMMAND_INSTR(0x17, 0x00), ILI9881C_COMMAND_INSTR(0x18, 0x00), ILI9881C_COMMAND_INSTR(0x19, 0x00), @@ -108,7 +108,7 @@ static const struct ili9881c_instr lhr050h41_init[] = { ILI9881C_COMMAND_INSTR(0x1b, 0x00), ILI9881C_COMMAND_INSTR(0x1c, 0x00), ILI9881C_COMMAND_INSTR(0x1d, 0x00), - ILI9881C_COMMAND_INSTR(0x1e, 0xC0), + ILI9881C_COMMAND_INSTR(0x1e, 0xc0), ILI9881C_COMMAND_INSTR(0x1f, 0x80), ILI9881C_COMMAND_INSTR(0x20, 0x04), ILI9881C_COMMAND_INSTR(0x21, 0x01), @@ -134,7 +134,7 @@ static const struct ili9881c_instr lhr050h41_init[] = { ILI9881C_COMMAND_INSTR(0x35, 0x00), ILI9881C_COMMAND_INSTR(0x36, 0x00), ILI9881C_COMMAND_INSTR(0x37, 0x00), - ILI9881C_COMMAND_INSTR(0x38, 0x3C), + ILI9881C_COMMAND_INSTR(0x38, 0x3c), ILI9881C_COMMAND_INSTR(0x39, 0x00), ILI9881C_COMMAND_INSTR(0x3a, 0x00), ILI9881C_COMMAND_INSTR(0x3b, 0x00), @@ -173,11 +173,11 @@ static const struct ili9881c_instr lhr050h41_init[] = { ILI9881C_COMMAND_INSTR(0x67, 0x02), ILI9881C_COMMAND_INSTR(0x68, 0x02), ILI9881C_COMMAND_INSTR(0x69, 0x02), - ILI9881C_COMMAND_INSTR(0x6a, 0x0C), + ILI9881C_COMMAND_INSTR(0x6a, 0x0c), ILI9881C_COMMAND_INSTR(0x6b, 0x02), - ILI9881C_COMMAND_INSTR(0x6c, 0x0F), - ILI9881C_COMMAND_INSTR(0x6d, 0x0E), - ILI9881C_COMMAND_INSTR(0x6e, 0x0D), + ILI9881C_COMMAND_INSTR(0x6c, 0x0f), + ILI9881C_COMMAND_INSTR(0x6d, 0x0e), + ILI9881C_COMMAND_INSTR(0x6e, 0x0d), ILI9881C_COMMAND_INSTR(0x6f, 0x06), ILI9881C_COMMAND_INSTR(0x70, 0x07), ILI9881C_COMMAND_INSTR(0x71, 0x02), @@ -195,74 +195,74 @@ static const struct ili9881c_instr lhr050h41_init[] = { ILI9881C_COMMAND_INSTR(0x7d, 0x02), ILI9881C_COMMAND_INSTR(0x7e, 0x02), ILI9881C_COMMAND_INSTR(0x7f, 0x02), - ILI9881C_COMMAND_INSTR(0x80, 0x0C), + ILI9881C_COMMAND_INSTR(0x80, 0x0c), ILI9881C_COMMAND_INSTR(0x81, 0x02), - ILI9881C_COMMAND_INSTR(0x82, 0x0F), - ILI9881C_COMMAND_INSTR(0x83, 0x0E), - ILI9881C_COMMAND_INSTR(0x84, 0x0D), + ILI9881C_COMMAND_INSTR(0x82, 0x0f), + ILI9881C_COMMAND_INSTR(0x83, 0x0e), + ILI9881C_COMMAND_INSTR(0x84, 0x0d), ILI9881C_COMMAND_INSTR(0x85, 0x06), ILI9881C_COMMAND_INSTR(0x86, 0x07), ILI9881C_COMMAND_INSTR(0x87, 0x02), ILI9881C_COMMAND_INSTR(0x88, 0x02), ILI9881C_COMMAND_INSTR(0x89, 0x02), - ILI9881C_COMMAND_INSTR(0x8A, 0x02), + ILI9881C_COMMAND_INSTR(0x8a, 0x02), ILI9881C_SWITCH_PAGE_INSTR(4), - ILI9881C_COMMAND_INSTR(0x6C, 0x15), - ILI9881C_COMMAND_INSTR(0x6E, 0x22), - ILI9881C_COMMAND_INSTR(0x6F, 0x33), - ILI9881C_COMMAND_INSTR(0x3A, 0xA4), - ILI9881C_COMMAND_INSTR(0x8D, 0x0D), - ILI9881C_COMMAND_INSTR(0x87, 0xBA), + ILI9881C_COMMAND_INSTR(0x6c, 0x15), + ILI9881C_COMMAND_INSTR(0x6e, 0x22), + ILI9881C_COMMAND_INSTR(0x6f, 0x33), + ILI9881C_COMMAND_INSTR(0x3a, 0xa4), + ILI9881C_COMMAND_INSTR(0x8d, 0x0d), + ILI9881C_COMMAND_INSTR(0x87, 0xba), ILI9881C_COMMAND_INSTR(0x26, 0x76), - ILI9881C_COMMAND_INSTR(0xB2, 0xD1), + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), ILI9881C_SWITCH_PAGE_INSTR(1), - ILI9881C_COMMAND_INSTR(0x22, 0x0A), - ILI9881C_COMMAND_INSTR(0x53, 0xDC), - ILI9881C_COMMAND_INSTR(0x55, 0xA7), + ILI9881C_COMMAND_INSTR(0x22, 0x0a), + ILI9881C_COMMAND_INSTR(0x53, 0xdc), + ILI9881C_COMMAND_INSTR(0x55, 0xa7), ILI9881C_COMMAND_INSTR(0x50, 0x78), ILI9881C_COMMAND_INSTR(0x51, 0x78), ILI9881C_COMMAND_INSTR(0x31, 0x02), ILI9881C_COMMAND_INSTR(0x60, 0x14), - ILI9881C_COMMAND_INSTR(0xA0, 0x2A), - ILI9881C_COMMAND_INSTR(0xA1, 0x39), - ILI9881C_COMMAND_INSTR(0xA2, 0x46), - ILI9881C_COMMAND_INSTR(0xA3, 0x0e), - ILI9881C_COMMAND_INSTR(0xA4, 0x12), - ILI9881C_COMMAND_INSTR(0xA5, 0x25), - ILI9881C_COMMAND_INSTR(0xA6, 0x19), - ILI9881C_COMMAND_INSTR(0xA7, 0x1d), - ILI9881C_COMMAND_INSTR(0xA8, 0xa6), - ILI9881C_COMMAND_INSTR(0xA9, 0x1C), - ILI9881C_COMMAND_INSTR(0xAA, 0x29), - ILI9881C_COMMAND_INSTR(0xAB, 0x85), - ILI9881C_COMMAND_INSTR(0xAC, 0x1C), - ILI9881C_COMMAND_INSTR(0xAD, 0x1B), - ILI9881C_COMMAND_INSTR(0xAE, 0x51), - ILI9881C_COMMAND_INSTR(0xAF, 0x22), - ILI9881C_COMMAND_INSTR(0xB0, 0x2d), - ILI9881C_COMMAND_INSTR(0xB1, 0x4f), - ILI9881C_COMMAND_INSTR(0xB2, 0x59), - ILI9881C_COMMAND_INSTR(0xB3, 0x3F), - ILI9881C_COMMAND_INSTR(0xC0, 0x2A), - ILI9881C_COMMAND_INSTR(0xC1, 0x3a), - ILI9881C_COMMAND_INSTR(0xC2, 0x45), - ILI9881C_COMMAND_INSTR(0xC3, 0x0e), - ILI9881C_COMMAND_INSTR(0xC4, 0x11), - ILI9881C_COMMAND_INSTR(0xC5, 0x24), - ILI9881C_COMMAND_INSTR(0xC6, 0x1a), - ILI9881C_COMMAND_INSTR(0xC7, 0x1c), - ILI9881C_COMMAND_INSTR(0xC8, 0xaa), - ILI9881C_COMMAND_INSTR(0xC9, 0x1C), - ILI9881C_COMMAND_INSTR(0xCA, 0x29), - ILI9881C_COMMAND_INSTR(0xCB, 0x96), - ILI9881C_COMMAND_INSTR(0xCC, 0x1C), - ILI9881C_COMMAND_INSTR(0xCD, 0x1B), - ILI9881C_COMMAND_INSTR(0xCE, 0x51), - ILI9881C_COMMAND_INSTR(0xCF, 0x22), - ILI9881C_COMMAND_INSTR(0xD0, 0x2b), - ILI9881C_COMMAND_INSTR(0xD1, 0x4b), - ILI9881C_COMMAND_INSTR(0xD2, 0x59), - ILI9881C_COMMAND_INSTR(0xD3, 0x3F), + ILI9881C_COMMAND_INSTR(0xa0, 0x2a), + ILI9881C_COMMAND_INSTR(0xa1, 0x39), + ILI9881C_COMMAND_INSTR(0xa2, 0x46), + ILI9881C_COMMAND_INSTR(0xa3, 0x0e), + ILI9881C_COMMAND_INSTR(0xa4, 0x12), + ILI9881C_COMMAND_INSTR(0xa5, 0x25), + ILI9881C_COMMAND_INSTR(0xa6, 0x19), + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), + ILI9881C_COMMAND_INSTR(0xa8, 0xa6), + ILI9881C_COMMAND_INSTR(0xa9, 0x1c), + ILI9881C_COMMAND_INSTR(0xaa, 0x29), + ILI9881C_COMMAND_INSTR(0xab, 0x85), + ILI9881C_COMMAND_INSTR(0xac, 0x1c), + ILI9881C_COMMAND_INSTR(0xad, 0x1b), + ILI9881C_COMMAND_INSTR(0xae, 0x51), + ILI9881C_COMMAND_INSTR(0xaf, 0x22), + ILI9881C_COMMAND_INSTR(0xb0, 0x2d), + ILI9881C_COMMAND_INSTR(0xb1, 0x4f), + ILI9881C_COMMAND_INSTR(0xb2, 0x59), + ILI9881C_COMMAND_INSTR(0xb3, 0x3f), + ILI9881C_COMMAND_INSTR(0xc0, 0x2a), + ILI9881C_COMMAND_INSTR(0xc1, 0x3a), + ILI9881C_COMMAND_INSTR(0xc2, 0x45), + ILI9881C_COMMAND_INSTR(0xc3, 0x0e), + ILI9881C_COMMAND_INSTR(0xc4, 0x11), + ILI9881C_COMMAND_INSTR(0xc5, 0x24), + ILI9881C_COMMAND_INSTR(0xc6, 0x1a), + ILI9881C_COMMAND_INSTR(0xc7, 0x1c), + ILI9881C_COMMAND_INSTR(0xc8, 0xaa), + ILI9881C_COMMAND_INSTR(0xc9, 0x1c), + ILI9881C_COMMAND_INSTR(0xca, 0x29), + ILI9881C_COMMAND_INSTR(0xcb, 0x96), + ILI9881C_COMMAND_INSTR(0xcc, 0x1c), + ILI9881C_COMMAND_INSTR(0xcd, 0x1b), + ILI9881C_COMMAND_INSTR(0xce, 0x51), + ILI9881C_COMMAND_INSTR(0xcf, 0x22), + ILI9881C_COMMAND_INSTR(0xd0, 0x2b), + ILI9881C_COMMAND_INSTR(0xd1, 0x4b), + ILI9881C_COMMAND_INSTR(0xd2, 0x59), + ILI9881C_COMMAND_INSTR(0xd3, 0x3f), }; static const struct ili9881c_instr k101_im2byl02_init[] = { @@ -276,12 +276,12 @@ static const struct ili9881c_instr k101_im2byl02_init[] = { ILI9881C_COMMAND_INSTR(0x07, 0x00), ILI9881C_COMMAND_INSTR(0x08, 0x00), ILI9881C_COMMAND_INSTR(0x09, 0x00), - ILI9881C_COMMAND_INSTR(0x0A, 0x01), - ILI9881C_COMMAND_INSTR(0x0B, 0x01), - ILI9881C_COMMAND_INSTR(0x0C, 0x00), - ILI9881C_COMMAND_INSTR(0x0D, 0x01), - ILI9881C_COMMAND_INSTR(0x0E, 0x01), - ILI9881C_COMMAND_INSTR(0x0F, 0x00), + ILI9881C_COMMAND_INSTR(0x0a, 0x01), + ILI9881C_COMMAND_INSTR(0x0b, 0x01), + ILI9881C_COMMAND_INSTR(0x0c, 0x00), + ILI9881C_COMMAND_INSTR(0x0d, 0x01), + ILI9881C_COMMAND_INSTR(0x0e, 0x01), + ILI9881C_COMMAND_INSTR(0x0f, 0x00), ILI9881C_COMMAND_INSTR(0x10, 0x00), ILI9881C_COMMAND_INSTR(0x11, 0x00), ILI9881C_COMMAND_INSTR(0x12, 0x00), @@ -292,12 +292,12 @@ static const struct ili9881c_instr k101_im2byl02_init[] = { ILI9881C_COMMAND_INSTR(0x17, 0x00), ILI9881C_COMMAND_INSTR(0x18, 0x00), ILI9881C_COMMAND_INSTR(0x19, 0x00), - ILI9881C_COMMAND_INSTR(0x1A, 0x00), - ILI9881C_COMMAND_INSTR(0x1B, 0x00), - ILI9881C_COMMAND_INSTR(0x1C, 0x00), - ILI9881C_COMMAND_INSTR(0x1D, 0x00), - ILI9881C_COMMAND_INSTR(0x1E, 0x40), - ILI9881C_COMMAND_INSTR(0x1F, 0xC0), + ILI9881C_COMMAND_INSTR(0x1a, 0x00), + ILI9881C_COMMAND_INSTR(0x1b, 0x00), + ILI9881C_COMMAND_INSTR(0x1c, 0x00), + ILI9881C_COMMAND_INSTR(0x1d, 0x00), + ILI9881C_COMMAND_INSTR(0x1e, 0x40), + ILI9881C_COMMAND_INSTR(0x1f, 0xc0), ILI9881C_COMMAND_INSTR(0x20, 0x06), ILI9881C_COMMAND_INSTR(0x21, 0x01), ILI9881C_COMMAND_INSTR(0x22, 0x06), @@ -306,14 +306,14 @@ static const struct ili9881c_instr k101_im2byl02_init[] = { ILI9881C_COMMAND_INSTR(0x25, 0x88), ILI9881C_COMMAND_INSTR(0x26, 0x00), ILI9881C_COMMAND_INSTR(0x27, 0x00), - ILI9881C_COMMAND_INSTR(0x28, 0x3B), + ILI9881C_COMMAND_INSTR(0x28, 0x3b), ILI9881C_COMMAND_INSTR(0x29, 0x03), - ILI9881C_COMMAND_INSTR(0x2A, 0x00), - ILI9881C_COMMAND_INSTR(0x2B, 0x00), - ILI9881C_COMMAND_INSTR(0x2C, 0x00), - ILI9881C_COMMAND_INSTR(0x2D, 0x00), - ILI9881C_COMMAND_INSTR(0x2E, 0x00), - ILI9881C_COMMAND_INSTR(0x2F, 0x00), + ILI9881C_COMMAND_INSTR(0x2a, 0x00), + ILI9881C_COMMAND_INSTR(0x2b, 0x00), + ILI9881C_COMMAND_INSTR(0x2c, 0x00), + ILI9881C_COMMAND_INSTR(0x2d, 0x00), + ILI9881C_COMMAND_INSTR(0x2e, 0x00), + ILI9881C_COMMAND_INSTR(0x2f, 0x00), ILI9881C_COMMAND_INSTR(0x30, 0x00), ILI9881C_COMMAND_INSTR(0x31, 0x00), ILI9881C_COMMAND_INSTR(0x32, 0x00), @@ -324,12 +324,12 @@ static const struct ili9881c_instr k101_im2byl02_init[] = { ILI9881C_COMMAND_INSTR(0x37, 0x00), ILI9881C_COMMAND_INSTR(0x38, 0x00), ILI9881C_COMMAND_INSTR(0x39, 0x00), - ILI9881C_COMMAND_INSTR(0x3A, 0x00), - ILI9881C_COMMAND_INSTR(0x3B, 0x00), - ILI9881C_COMMAND_INSTR(0x3C, 0x00), - ILI9881C_COMMAND_INSTR(0x3D, 0x00), - ILI9881C_COMMAND_INSTR(0x3E, 0x00), - ILI9881C_COMMAND_INSTR(0x3F, 0x00), + ILI9881C_COMMAND_INSTR(0x3a, 0x00), + ILI9881C_COMMAND_INSTR(0x3b, 0x00), + ILI9881C_COMMAND_INSTR(0x3c, 0x00), + ILI9881C_COMMAND_INSTR(0x3d, 0x00), + ILI9881C_COMMAND_INSTR(0x3e, 0x00), + ILI9881C_COMMAND_INSTR(0x3f, 0x00), ILI9881C_COMMAND_INSTR(0x40, 0x00), ILI9881C_COMMAND_INSTR(0x41, 0x00), ILI9881C_COMMAND_INSTR(0x42, 0x00), @@ -340,17 +340,17 @@ static const struct ili9881c_instr k101_im2byl02_init[] = { ILI9881C_COMMAND_INSTR(0x52, 0x45), ILI9881C_COMMAND_INSTR(0x53, 0x67), ILI9881C_COMMAND_INSTR(0x54, 0x89), - ILI9881C_COMMAND_INSTR(0x55, 0xAB), + ILI9881C_COMMAND_INSTR(0x55, 0xab), ILI9881C_COMMAND_INSTR(0x56, 0x01), ILI9881C_COMMAND_INSTR(0x57, 0x23), ILI9881C_COMMAND_INSTR(0x58, 0x45), ILI9881C_COMMAND_INSTR(0x59, 0x67), - ILI9881C_COMMAND_INSTR(0x5A, 0x89), - ILI9881C_COMMAND_INSTR(0x5B, 0xAB), - ILI9881C_COMMAND_INSTR(0x5C, 0xCD), - ILI9881C_COMMAND_INSTR(0x5D, 0xEF), - ILI9881C_COMMAND_INSTR(0x5E, 0x00), - ILI9881C_COMMAND_INSTR(0x5F, 0x01), + ILI9881C_COMMAND_INSTR(0x5a, 0x89), + ILI9881C_COMMAND_INSTR(0x5b, 0xab), + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), + ILI9881C_COMMAND_INSTR(0x5d, 0xef), + ILI9881C_COMMAND_INSTR(0x5e, 0x00), + ILI9881C_COMMAND_INSTR(0x5f, 0x01), ILI9881C_COMMAND_INSTR(0x60, 0x01), ILI9881C_COMMAND_INSTR(0x61, 0x06), ILI9881C_COMMAND_INSTR(0x62, 0x06), @@ -361,101 +361,101 @@ static const struct ili9881c_instr k101_im2byl02_init[] = { ILI9881C_COMMAND_INSTR(0x67, 0x02), ILI9881C_COMMAND_INSTR(0x68, 0x02), ILI9881C_COMMAND_INSTR(0x69, 0x05), - ILI9881C_COMMAND_INSTR(0x6A, 0x05), - ILI9881C_COMMAND_INSTR(0x6B, 0x02), - ILI9881C_COMMAND_INSTR(0x6C, 0x0D), - ILI9881C_COMMAND_INSTR(0x6D, 0x0D), - ILI9881C_COMMAND_INSTR(0x6E, 0x0C), - ILI9881C_COMMAND_INSTR(0x6F, 0x0C), - ILI9881C_COMMAND_INSTR(0x70, 0x0F), - ILI9881C_COMMAND_INSTR(0x71, 0x0F), - ILI9881C_COMMAND_INSTR(0x72, 0x0E), - ILI9881C_COMMAND_INSTR(0x73, 0x0E), + ILI9881C_COMMAND_INSTR(0x6a, 0x05), + ILI9881C_COMMAND_INSTR(0x6b, 0x02), + ILI9881C_COMMAND_INSTR(0x6c, 0x0d), + ILI9881C_COMMAND_INSTR(0x6d, 0x0d), + ILI9881C_COMMAND_INSTR(0x6e, 0x0c), + ILI9881C_COMMAND_INSTR(0x6f, 0x0c), + ILI9881C_COMMAND_INSTR(0x70, 0x0f), + ILI9881C_COMMAND_INSTR(0x71, 0x0f), + ILI9881C_COMMAND_INSTR(0x72, 0x0e), + ILI9881C_COMMAND_INSTR(0x73, 0x0e), ILI9881C_COMMAND_INSTR(0x74, 0x02), ILI9881C_COMMAND_INSTR(0x75, 0x01), ILI9881C_COMMAND_INSTR(0x76, 0x01), ILI9881C_COMMAND_INSTR(0x77, 0x06), ILI9881C_COMMAND_INSTR(0x78, 0x06), ILI9881C_COMMAND_INSTR(0x79, 0x07), - ILI9881C_COMMAND_INSTR(0x7A, 0x07), - ILI9881C_COMMAND_INSTR(0x7B, 0x00), - ILI9881C_COMMAND_INSTR(0x7C, 0x00), - ILI9881C_COMMAND_INSTR(0x7D, 0x02), - ILI9881C_COMMAND_INSTR(0x7E, 0x02), - ILI9881C_COMMAND_INSTR(0x7F, 0x05), + ILI9881C_COMMAND_INSTR(0x7a, 0x07), + ILI9881C_COMMAND_INSTR(0x7b, 0x00), + ILI9881C_COMMAND_INSTR(0x7c, 0x00), + ILI9881C_COMMAND_INSTR(0x7d, 0x02), + ILI9881C_COMMAND_INSTR(0x7e, 0x02), + ILI9881C_COMMAND_INSTR(0x7f, 0x05), ILI9881C_COMMAND_INSTR(0x80, 0x05), ILI9881C_COMMAND_INSTR(0x81, 0x02), - ILI9881C_COMMAND_INSTR(0x82, 0x0D), - ILI9881C_COMMAND_INSTR(0x83, 0x0D), - ILI9881C_COMMAND_INSTR(0x84, 0x0C), - ILI9881C_COMMAND_INSTR(0x85, 0x0C), - ILI9881C_COMMAND_INSTR(0x86, 0x0F), - ILI9881C_COMMAND_INSTR(0x87, 0x0F), - ILI9881C_COMMAND_INSTR(0x88, 0x0E), - ILI9881C_COMMAND_INSTR(0x89, 0x0E), - ILI9881C_COMMAND_INSTR(0x8A, 0x02), + ILI9881C_COMMAND_INSTR(0x82, 0x0d), + ILI9881C_COMMAND_INSTR(0x83, 0x0d), + ILI9881C_COMMAND_INSTR(0x84, 0x0c), + ILI9881C_COMMAND_INSTR(0x85, 0x0c), + ILI9881C_COMMAND_INSTR(0x86, 0x0f), + ILI9881C_COMMAND_INSTR(0x87, 0x0f), + ILI9881C_COMMAND_INSTR(0x88, 0x0e), + ILI9881C_COMMAND_INSTR(0x89, 0x0e), + ILI9881C_COMMAND_INSTR(0x8a, 0x02), ILI9881C_SWITCH_PAGE_INSTR(4), - ILI9881C_COMMAND_INSTR(0x3B, 0xC0), /* ILI4003D sel */ - ILI9881C_COMMAND_INSTR(0x6C, 0x15), /* Set VCORE voltage = 1.5V */ - ILI9881C_COMMAND_INSTR(0x6E, 0x2A), /* di_pwr_reg=0 for power mode 2A, VGH clamp 18V */ - ILI9881C_COMMAND_INSTR(0x6F, 0x33), /* pumping ratio VGH=5x VGL=-3x */ - ILI9881C_COMMAND_INSTR(0x8D, 0x1B), /* VGL clamp -10V */ - ILI9881C_COMMAND_INSTR(0x87, 0xBA), /* ESD */ - ILI9881C_COMMAND_INSTR(0x3A, 0x24), /* POWER SAVING */ + ILI9881C_COMMAND_INSTR(0x3b, 0xc0), /* ILI4003D sel */ + ILI9881C_COMMAND_INSTR(0x6c, 0x15), /* Set VCORE voltage = 1.5V */ + ILI9881C_COMMAND_INSTR(0x6e, 0x2a), /* di_pwr_reg=0 for power mode 2A, VGH clamp 18V */ + ILI9881C_COMMAND_INSTR(0x6f, 0x33), /* pumping ratio VGH=5x VGL=-3x */ + ILI9881C_COMMAND_INSTR(0x8d, 0x1b), /* VGL clamp -10V */ + ILI9881C_COMMAND_INSTR(0x87, 0xba), /* ESD */ + ILI9881C_COMMAND_INSTR(0x3a, 0x24), /* POWER SAVING */ ILI9881C_COMMAND_INSTR(0x26, 0x76), - ILI9881C_COMMAND_INSTR(0xB2, 0xD1), + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), ILI9881C_SWITCH_PAGE_INSTR(1), - ILI9881C_COMMAND_INSTR(0x22, 0x0A), /* BGR, SS */ + ILI9881C_COMMAND_INSTR(0x22, 0x0a), /* BGR, SS */ ILI9881C_COMMAND_INSTR(0x31, 0x00), /* Zigzag type3 inversion */ ILI9881C_COMMAND_INSTR(0x40, 0x53), /* ILI4003D sel */ ILI9881C_COMMAND_INSTR(0x43, 0x66), - ILI9881C_COMMAND_INSTR(0x53, 0x4C), + ILI9881C_COMMAND_INSTR(0x53, 0x4c), ILI9881C_COMMAND_INSTR(0x50, 0x87), ILI9881C_COMMAND_INSTR(0x51, 0x82), ILI9881C_COMMAND_INSTR(0x60, 0x15), ILI9881C_COMMAND_INSTR(0x61, 0x01), - ILI9881C_COMMAND_INSTR(0x62, 0x0C), + ILI9881C_COMMAND_INSTR(0x62, 0x0c), ILI9881C_COMMAND_INSTR(0x63, 0x00), - ILI9881C_COMMAND_INSTR(0xA0, 0x00), - ILI9881C_COMMAND_INSTR(0xA1, 0x13), /* VP251 */ - ILI9881C_COMMAND_INSTR(0xA2, 0x23), /* VP247 */ - ILI9881C_COMMAND_INSTR(0xA3, 0x14), /* VP243 */ - ILI9881C_COMMAND_INSTR(0xA4, 0x16), /* VP239 */ - ILI9881C_COMMAND_INSTR(0xA5, 0x29), /* VP231 */ - ILI9881C_COMMAND_INSTR(0xA6, 0x1E), /* VP219 */ - ILI9881C_COMMAND_INSTR(0xA7, 0x1D), /* VP203 */ - ILI9881C_COMMAND_INSTR(0xA8, 0x86), /* VP175 */ - ILI9881C_COMMAND_INSTR(0xA9, 0x1E), /* VP144 */ - ILI9881C_COMMAND_INSTR(0xAA, 0x29), /* VP111 */ - ILI9881C_COMMAND_INSTR(0xAB, 0x74), /* VP80 */ - ILI9881C_COMMAND_INSTR(0xAC, 0x19), /* VP52 */ - ILI9881C_COMMAND_INSTR(0xAD, 0x17), /* VP36 */ - ILI9881C_COMMAND_INSTR(0xAE, 0x4B), /* VP24 */ - ILI9881C_COMMAND_INSTR(0xAF, 0x20), /* VP16 */ - ILI9881C_COMMAND_INSTR(0xB0, 0x26), /* VP12 */ - ILI9881C_COMMAND_INSTR(0xB1, 0x4C), /* VP8 */ - ILI9881C_COMMAND_INSTR(0xB2, 0x5D), /* VP4 */ - ILI9881C_COMMAND_INSTR(0xB3, 0x3F), /* VP0 */ - ILI9881C_COMMAND_INSTR(0xC0, 0x00), /* VN255 GAMMA N */ - ILI9881C_COMMAND_INSTR(0xC1, 0x13), /* VN251 */ - ILI9881C_COMMAND_INSTR(0xC2, 0x23), /* VN247 */ - ILI9881C_COMMAND_INSTR(0xC3, 0x14), /* VN243 */ - ILI9881C_COMMAND_INSTR(0xC4, 0x16), /* VN239 */ - ILI9881C_COMMAND_INSTR(0xC5, 0x29), /* VN231 */ - ILI9881C_COMMAND_INSTR(0xC6, 0x1E), /* VN219 */ - ILI9881C_COMMAND_INSTR(0xC7, 0x1D), /* VN203 */ - ILI9881C_COMMAND_INSTR(0xC8, 0x86), /* VN175 */ - ILI9881C_COMMAND_INSTR(0xC9, 0x1E), /* VN144 */ - ILI9881C_COMMAND_INSTR(0xCA, 0x29), /* VN111 */ - ILI9881C_COMMAND_INSTR(0xCB, 0x74), /* VN80 */ - ILI9881C_COMMAND_INSTR(0xCC, 0x19), /* VN52 */ - ILI9881C_COMMAND_INSTR(0xCD, 0x17), /* VN36 */ - ILI9881C_COMMAND_INSTR(0xCE, 0x4B), /* VN24 */ - ILI9881C_COMMAND_INSTR(0xCF, 0x20), /* VN16 */ - ILI9881C_COMMAND_INSTR(0xD0, 0x26), /* VN12 */ - ILI9881C_COMMAND_INSTR(0xD1, 0x4C), /* VN8 */ - ILI9881C_COMMAND_INSTR(0xD2, 0x5D), /* VN4 */ - ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */ + ILI9881C_COMMAND_INSTR(0xa0, 0x00), + ILI9881C_COMMAND_INSTR(0xa1, 0x13), /* VP251 */ + ILI9881C_COMMAND_INSTR(0xa2, 0x23), /* VP247 */ + ILI9881C_COMMAND_INSTR(0xa3, 0x14), /* VP243 */ + ILI9881C_COMMAND_INSTR(0xa4, 0x16), /* VP239 */ + ILI9881C_COMMAND_INSTR(0xa5, 0x29), /* VP231 */ + ILI9881C_COMMAND_INSTR(0xa6, 0x1e), /* VP219 */ + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), /* VP203 */ + ILI9881C_COMMAND_INSTR(0xa8, 0x86), /* VP175 */ + ILI9881C_COMMAND_INSTR(0xa9, 0x1e), /* VP144 */ + ILI9881C_COMMAND_INSTR(0xaa, 0x29), /* VP111 */ + ILI9881C_COMMAND_INSTR(0xab, 0x74), /* VP80 */ + ILI9881C_COMMAND_INSTR(0xac, 0x19), /* VP52 */ + ILI9881C_COMMAND_INSTR(0xad, 0x17), /* VP36 */ + ILI9881C_COMMAND_INSTR(0xae, 0x4b), /* VP24 */ + ILI9881C_COMMAND_INSTR(0xaf, 0x20), /* VP16 */ + ILI9881C_COMMAND_INSTR(0xb0, 0x26), /* VP12 */ + ILI9881C_COMMAND_INSTR(0xb1, 0x4c), /* VP8 */ + ILI9881C_COMMAND_INSTR(0xb2, 0x5d), /* VP4 */ + ILI9881C_COMMAND_INSTR(0xb3, 0x3f), /* VP0 */ + ILI9881C_COMMAND_INSTR(0xc0, 0x00), /* VN255 GAMMA N */ + ILI9881C_COMMAND_INSTR(0xc1, 0x13), /* VN251 */ + ILI9881C_COMMAND_INSTR(0xc2, 0x23), /* VN247 */ + ILI9881C_COMMAND_INSTR(0xc3, 0x14), /* VN243 */ + ILI9881C_COMMAND_INSTR(0xc4, 0x16), /* VN239 */ + ILI9881C_COMMAND_INSTR(0xc5, 0x29), /* VN231 */ + ILI9881C_COMMAND_INSTR(0xc6, 0x1e), /* VN219 */ + ILI9881C_COMMAND_INSTR(0xc7, 0x1d), /* VN203 */ + ILI9881C_COMMAND_INSTR(0xc8, 0x86), /* VN175 */ + ILI9881C_COMMAND_INSTR(0xc9, 0x1e), /* VN144 */ + ILI9881C_COMMAND_INSTR(0xca, 0x29), /* VN111 */ + ILI9881C_COMMAND_INSTR(0xcb, 0x74), /* VN80 */ + ILI9881C_COMMAND_INSTR(0xcc, 0x19), /* VN52 */ + ILI9881C_COMMAND_INSTR(0xcd, 0x17), /* VN36 */ + ILI9881C_COMMAND_INSTR(0xce, 0x4b), /* VN24 */ + ILI9881C_COMMAND_INSTR(0xcf, 0x20), /* VN16 */ + ILI9881C_COMMAND_INSTR(0xd0, 0x26), /* VN12 */ + ILI9881C_COMMAND_INSTR(0xd1, 0x4c), /* VN8 */ + ILI9881C_COMMAND_INSTR(0xd2, 0x5d), /* VN4 */ + ILI9881C_COMMAND_INSTR(0xd3, 0x3f), /* VN0 */ }; static const struct ili9881c_instr kd050hdfia020_init[] = { @@ -517,7 +517,7 @@ static const struct ili9881c_instr kd050hdfia020_init[] = { ILI9881C_COMMAND_INSTR(0x35, 0x00), ILI9881C_COMMAND_INSTR(0x36, 0x00), ILI9881C_COMMAND_INSTR(0x37, 0x00), - ILI9881C_COMMAND_INSTR(0x38, 0x3C), + ILI9881C_COMMAND_INSTR(0x38, 0x3c), ILI9881C_COMMAND_INSTR(0x39, 0x00), ILI9881C_COMMAND_INSTR(0x3a, 0x40), ILI9881C_COMMAND_INSTR(0x3b, 0x40), @@ -549,10 +549,10 @@ static const struct ili9881c_instr kd050hdfia020_init[] = { ILI9881C_COMMAND_INSTR(0x60, 0x00), ILI9881C_COMMAND_INSTR(0x61, 0x15), ILI9881C_COMMAND_INSTR(0x62, 0x14), - ILI9881C_COMMAND_INSTR(0x63, 0x0E), - ILI9881C_COMMAND_INSTR(0x64, 0x0F), - ILI9881C_COMMAND_INSTR(0x65, 0x0C), - ILI9881C_COMMAND_INSTR(0x66, 0x0D), + ILI9881C_COMMAND_INSTR(0x63, 0x0e), + ILI9881C_COMMAND_INSTR(0x64, 0x0f), + ILI9881C_COMMAND_INSTR(0x65, 0x0c), + ILI9881C_COMMAND_INSTR(0x66, 0x0d), ILI9881C_COMMAND_INSTR(0x67, 0x06), ILI9881C_COMMAND_INSTR(0x68, 0x02), ILI9881C_COMMAND_INSTR(0x69, 0x07), @@ -571,10 +571,10 @@ static const struct ili9881c_instr kd050hdfia020_init[] = { ILI9881C_COMMAND_INSTR(0x76, 0x00), ILI9881C_COMMAND_INSTR(0x77, 0x14), ILI9881C_COMMAND_INSTR(0x78, 0x15), - ILI9881C_COMMAND_INSTR(0x79, 0x0E), - ILI9881C_COMMAND_INSTR(0x7a, 0x0F), - ILI9881C_COMMAND_INSTR(0x7b, 0x0C), - ILI9881C_COMMAND_INSTR(0x7c, 0x0D), + ILI9881C_COMMAND_INSTR(0x79, 0x0e), + ILI9881C_COMMAND_INSTR(0x7a, 0x0f), + ILI9881C_COMMAND_INSTR(0x7b, 0x0c), + ILI9881C_COMMAND_INSTR(0x7c, 0x0d), ILI9881C_COMMAND_INSTR(0x7d, 0x06), ILI9881C_COMMAND_INSTR(0x7e, 0x02), ILI9881C_COMMAND_INSTR(0x7f, 0x07), @@ -587,71 +587,71 @@ static const struct ili9881c_instr kd050hdfia020_init[] = { ILI9881C_COMMAND_INSTR(0x87, 0x02), ILI9881C_COMMAND_INSTR(0x88, 0x02), ILI9881C_COMMAND_INSTR(0x89, 0x02), - ILI9881C_COMMAND_INSTR(0x8A, 0x02), + ILI9881C_COMMAND_INSTR(0x8a, 0x02), ILI9881C_SWITCH_PAGE_INSTR(0x4), - ILI9881C_COMMAND_INSTR(0x6C, 0x15), - ILI9881C_COMMAND_INSTR(0x6E, 0x2A), - ILI9881C_COMMAND_INSTR(0x6F, 0x33), - ILI9881C_COMMAND_INSTR(0x3A, 0x94), - ILI9881C_COMMAND_INSTR(0x8D, 0x15), - ILI9881C_COMMAND_INSTR(0x87, 0xBA), + ILI9881C_COMMAND_INSTR(0x6c, 0x15), + ILI9881C_COMMAND_INSTR(0x6e, 0x2a), + ILI9881C_COMMAND_INSTR(0x6f, 0x33), + ILI9881C_COMMAND_INSTR(0x3a, 0x94), + ILI9881C_COMMAND_INSTR(0x8d, 0x15), + ILI9881C_COMMAND_INSTR(0x87, 0xba), ILI9881C_COMMAND_INSTR(0x26, 0x76), - ILI9881C_COMMAND_INSTR(0xB2, 0xD1), - ILI9881C_COMMAND_INSTR(0xB5, 0x06), + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), + ILI9881C_COMMAND_INSTR(0xb5, 0x06), ILI9881C_SWITCH_PAGE_INSTR(0x1), - ILI9881C_COMMAND_INSTR(0x22, 0x0A), + ILI9881C_COMMAND_INSTR(0x22, 0x0a), ILI9881C_COMMAND_INSTR(0x31, 0x00), ILI9881C_COMMAND_INSTR(0x53, 0x90), - ILI9881C_COMMAND_INSTR(0x55, 0xA2), - ILI9881C_COMMAND_INSTR(0x50, 0xB7), - ILI9881C_COMMAND_INSTR(0x51, 0xB7), + ILI9881C_COMMAND_INSTR(0x55, 0xa2), + ILI9881C_COMMAND_INSTR(0x50, 0xb7), + ILI9881C_COMMAND_INSTR(0x51, 0xb7), ILI9881C_COMMAND_INSTR(0x60, 0x22), ILI9881C_COMMAND_INSTR(0x61, 0x00), ILI9881C_COMMAND_INSTR(0x62, 0x19), ILI9881C_COMMAND_INSTR(0x63, 0x10), - ILI9881C_COMMAND_INSTR(0xA0, 0x08), - ILI9881C_COMMAND_INSTR(0xA1, 0x1A), - ILI9881C_COMMAND_INSTR(0xA2, 0x27), - ILI9881C_COMMAND_INSTR(0xA3, 0x15), - ILI9881C_COMMAND_INSTR(0xA4, 0x17), - ILI9881C_COMMAND_INSTR(0xA5, 0x2A), - ILI9881C_COMMAND_INSTR(0xA6, 0x1E), - ILI9881C_COMMAND_INSTR(0xA7, 0x1F), - ILI9881C_COMMAND_INSTR(0xA8, 0x8B), - ILI9881C_COMMAND_INSTR(0xA9, 0x1B), - ILI9881C_COMMAND_INSTR(0xAA, 0x27), - ILI9881C_COMMAND_INSTR(0xAB, 0x78), - ILI9881C_COMMAND_INSTR(0xAC, 0x18), - ILI9881C_COMMAND_INSTR(0xAD, 0x18), - ILI9881C_COMMAND_INSTR(0xAE, 0x4C), - ILI9881C_COMMAND_INSTR(0xAF, 0x21), - ILI9881C_COMMAND_INSTR(0xB0, 0x27), - ILI9881C_COMMAND_INSTR(0xB1, 0x54), - ILI9881C_COMMAND_INSTR(0xB2, 0x67), - ILI9881C_COMMAND_INSTR(0xB3, 0x39), - ILI9881C_COMMAND_INSTR(0xC0, 0x08), - ILI9881C_COMMAND_INSTR(0xC1, 0x1A), - ILI9881C_COMMAND_INSTR(0xC2, 0x27), - ILI9881C_COMMAND_INSTR(0xC3, 0x15), - ILI9881C_COMMAND_INSTR(0xC4, 0x17), - ILI9881C_COMMAND_INSTR(0xC5, 0x2A), - ILI9881C_COMMAND_INSTR(0xC6, 0x1E), - ILI9881C_COMMAND_INSTR(0xC7, 0x1F), - ILI9881C_COMMAND_INSTR(0xC8, 0x8B), - ILI9881C_COMMAND_INSTR(0xC9, 0x1B), - ILI9881C_COMMAND_INSTR(0xCA, 0x27), - ILI9881C_COMMAND_INSTR(0xCB, 0x78), - ILI9881C_COMMAND_INSTR(0xCC, 0x18), - ILI9881C_COMMAND_INSTR(0xCD, 0x18), - ILI9881C_COMMAND_INSTR(0xCE, 0x4C), - ILI9881C_COMMAND_INSTR(0xCF, 0x21), - ILI9881C_COMMAND_INSTR(0xD0, 0x27), - ILI9881C_COMMAND_INSTR(0xD1, 0x54), - ILI9881C_COMMAND_INSTR(0xD2, 0x67), - ILI9881C_COMMAND_INSTR(0xD3, 0x39), + ILI9881C_COMMAND_INSTR(0xa0, 0x08), + ILI9881C_COMMAND_INSTR(0xa1, 0x1a), + ILI9881C_COMMAND_INSTR(0xa2, 0x27), + ILI9881C_COMMAND_INSTR(0xa3, 0x15), + ILI9881C_COMMAND_INSTR(0xa4, 0x17), + ILI9881C_COMMAND_INSTR(0xa5, 0x2a), + ILI9881C_COMMAND_INSTR(0xa6, 0x1e), + ILI9881C_COMMAND_INSTR(0xa7, 0x1f), + ILI9881C_COMMAND_INSTR(0xa8, 0x8b), + ILI9881C_COMMAND_INSTR(0xa9, 0x1b), + ILI9881C_COMMAND_INSTR(0xaa, 0x27), + ILI9881C_COMMAND_INSTR(0xab, 0x78), + ILI9881C_COMMAND_INSTR(0xac, 0x18), + ILI9881C_COMMAND_INSTR(0xad, 0x18), + ILI9881C_COMMAND_INSTR(0xae, 0x4c), + ILI9881C_COMMAND_INSTR(0xaf, 0x21), + ILI9881C_COMMAND_INSTR(0xb0, 0x27), + ILI9881C_COMMAND_INSTR(0xb1, 0x54), + ILI9881C_COMMAND_INSTR(0xb2, 0x67), + ILI9881C_COMMAND_INSTR(0xb3, 0x39), + ILI9881C_COMMAND_INSTR(0xc0, 0x08), + ILI9881C_COMMAND_INSTR(0xc1, 0x1a), + ILI9881C_COMMAND_INSTR(0xc2, 0x27), + ILI9881C_COMMAND_INSTR(0xc3, 0x15), + ILI9881C_COMMAND_INSTR(0xc4, 0x17), + ILI9881C_COMMAND_INSTR(0xc5, 0x2a), + ILI9881C_COMMAND_INSTR(0xc6, 0x1e), + ILI9881C_COMMAND_INSTR(0xc7, 0x1f), + ILI9881C_COMMAND_INSTR(0xc8, 0x8b), + ILI9881C_COMMAND_INSTR(0xc9, 0x1b), + ILI9881C_COMMAND_INSTR(0xca, 0x27), + ILI9881C_COMMAND_INSTR(0xcb, 0x78), + ILI9881C_COMMAND_INSTR(0xcc, 0x18), + ILI9881C_COMMAND_INSTR(0xcd, 0x18), + ILI9881C_COMMAND_INSTR(0xce, 0x4c), + ILI9881C_COMMAND_INSTR(0xcf, 0x21), + ILI9881C_COMMAND_INSTR(0xd0, 0x27), + ILI9881C_COMMAND_INSTR(0xd1, 0x54), + ILI9881C_COMMAND_INSTR(0xd2, 0x67), + ILI9881C_COMMAND_INSTR(0xd3, 0x39), ILI9881C_SWITCH_PAGE_INSTR(0), ILI9881C_COMMAND_INSTR(0x35, 0x00), - ILI9881C_COMMAND_INSTR(0x3A, 0x7), + ILI9881C_COMMAND_INSTR(0x3a, 0x7), }; static const struct ili9881c_instr tl050hdv35_init[] = { @@ -696,7 +696,7 @@ static const struct ili9881c_instr tl050hdv35_init[] = { ILI9881C_COMMAND_INSTR(0x35, 0x00), ILI9881C_COMMAND_INSTR(0x36, 0x00), ILI9881C_COMMAND_INSTR(0x37, 0x00), - ILI9881C_COMMAND_INSTR(0x38, 0x3C), + ILI9881C_COMMAND_INSTR(0x38, 0x3c), ILI9881C_COMMAND_INSTR(0x39, 0x00), ILI9881C_COMMAND_INSTR(0x3a, 0x40), ILI9881C_COMMAND_INSTR(0x3b, 0x40), @@ -750,7 +750,7 @@ static const struct ili9881c_instr tl050hdv35_init[] = { ILI9881C_COMMAND_INSTR(0x7f, 0x07), ILI9881C_COMMAND_INSTR(0x88, 0x02), ILI9881C_COMMAND_INSTR(0x89, 0x02), - ILI9881C_COMMAND_INSTR(0x8A, 0x02), + ILI9881C_COMMAND_INSTR(0x8a, 0x02), ILI9881C_SWITCH_PAGE_INSTR(4), ILI9881C_COMMAND_INSTR(0x38, 0x01), ILI9881C_COMMAND_INSTR(0x39, 0x00), @@ -820,6 +820,204 @@ static const struct ili9881c_instr tl050hdv35_init[] = { ILI9881C_COMMAND_INSTR(0xd3, 0x39), }; +static const struct ili9881c_instr w552946aaa_init[] = { + ILI9881C_SWITCH_PAGE_INSTR(3), + ILI9881C_COMMAND_INSTR(0x01, 0x00), + ILI9881C_COMMAND_INSTR(0x02, 0x00), + ILI9881C_COMMAND_INSTR(0x03, 0x53), + ILI9881C_COMMAND_INSTR(0x04, 0x53), + ILI9881C_COMMAND_INSTR(0x05, 0x13), + ILI9881C_COMMAND_INSTR(0x06, 0x04), + ILI9881C_COMMAND_INSTR(0x07, 0x02), + ILI9881C_COMMAND_INSTR(0x08, 0x02), + ILI9881C_COMMAND_INSTR(0x09, 0x00), + ILI9881C_COMMAND_INSTR(0x0a, 0x00), + ILI9881C_COMMAND_INSTR(0x0b, 0x00), + ILI9881C_COMMAND_INSTR(0x0c, 0x00), + ILI9881C_COMMAND_INSTR(0x0d, 0x00), + ILI9881C_COMMAND_INSTR(0x0e, 0x00), + ILI9881C_COMMAND_INSTR(0x0f, 0x00), + ILI9881C_COMMAND_INSTR(0x10, 0x00), + ILI9881C_COMMAND_INSTR(0x11, 0x00), + ILI9881C_COMMAND_INSTR(0x12, 0x00), + ILI9881C_COMMAND_INSTR(0x13, 0x00), + ILI9881C_COMMAND_INSTR(0x14, 0x00), + ILI9881C_COMMAND_INSTR(0x15, 0x08), + ILI9881C_COMMAND_INSTR(0x16, 0x10), + ILI9881C_COMMAND_INSTR(0x17, 0x00), + ILI9881C_COMMAND_INSTR(0x18, 0x08), + ILI9881C_COMMAND_INSTR(0x19, 0x00), + ILI9881C_COMMAND_INSTR(0x1a, 0x00), + ILI9881C_COMMAND_INSTR(0x1b, 0x00), + ILI9881C_COMMAND_INSTR(0x1c, 0x00), + ILI9881C_COMMAND_INSTR(0x1d, 0x00), + ILI9881C_COMMAND_INSTR(0x1e, 0xc0), + ILI9881C_COMMAND_INSTR(0x1f, 0x80), + ILI9881C_COMMAND_INSTR(0x20, 0x02), + ILI9881C_COMMAND_INSTR(0x21, 0x09), + ILI9881C_COMMAND_INSTR(0x22, 0x00), + ILI9881C_COMMAND_INSTR(0x23, 0x00), + ILI9881C_COMMAND_INSTR(0x24, 0x00), + ILI9881C_COMMAND_INSTR(0x25, 0x00), + ILI9881C_COMMAND_INSTR(0x26, 0x00), + ILI9881C_COMMAND_INSTR(0x27, 0x00), + ILI9881C_COMMAND_INSTR(0x28, 0x55), + ILI9881C_COMMAND_INSTR(0x29, 0x03), + ILI9881C_COMMAND_INSTR(0x2a, 0x00), + ILI9881C_COMMAND_INSTR(0x2b, 0x00), + ILI9881C_COMMAND_INSTR(0x2c, 0x00), + ILI9881C_COMMAND_INSTR(0x2d, 0x00), + ILI9881C_COMMAND_INSTR(0x2e, 0x00), + ILI9881C_COMMAND_INSTR(0x2f, 0x00), + ILI9881C_COMMAND_INSTR(0x30, 0x00), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x32, 0x00), + ILI9881C_COMMAND_INSTR(0x33, 0x00), + ILI9881C_COMMAND_INSTR(0x34, 0x04), + ILI9881C_COMMAND_INSTR(0x35, 0x05), + ILI9881C_COMMAND_INSTR(0x36, 0x05), + ILI9881C_COMMAND_INSTR(0x37, 0x00), + ILI9881C_COMMAND_INSTR(0x38, 0x3c), + ILI9881C_COMMAND_INSTR(0x39, 0x35), + ILI9881C_COMMAND_INSTR(0x3a, 0x00), + ILI9881C_COMMAND_INSTR(0x3b, 0x40), + ILI9881C_COMMAND_INSTR(0x3c, 0x00), + ILI9881C_COMMAND_INSTR(0x3d, 0x00), + ILI9881C_COMMAND_INSTR(0x3e, 0x00), + ILI9881C_COMMAND_INSTR(0x3f, 0x00), + ILI9881C_COMMAND_INSTR(0x40, 0x00), + ILI9881C_COMMAND_INSTR(0x41, 0x88), + ILI9881C_COMMAND_INSTR(0x42, 0x00), + ILI9881C_COMMAND_INSTR(0x43, 0x00), + ILI9881C_COMMAND_INSTR(0x44, 0x1f), + ILI9881C_COMMAND_INSTR(0x50, 0x01), + ILI9881C_COMMAND_INSTR(0x51, 0x23), + ILI9881C_COMMAND_INSTR(0x52, 0x45), + ILI9881C_COMMAND_INSTR(0x53, 0x67), + ILI9881C_COMMAND_INSTR(0x54, 0x89), + ILI9881C_COMMAND_INSTR(0x55, 0xab), + ILI9881C_COMMAND_INSTR(0x56, 0x01), + ILI9881C_COMMAND_INSTR(0x57, 0x23), + ILI9881C_COMMAND_INSTR(0x58, 0x45), + ILI9881C_COMMAND_INSTR(0x59, 0x67), + ILI9881C_COMMAND_INSTR(0x5a, 0x89), + ILI9881C_COMMAND_INSTR(0x5b, 0xab), + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), + ILI9881C_COMMAND_INSTR(0x5d, 0xef), + ILI9881C_COMMAND_INSTR(0x5e, 0x03), + ILI9881C_COMMAND_INSTR(0x5f, 0x14), + ILI9881C_COMMAND_INSTR(0x60, 0x15), + ILI9881C_COMMAND_INSTR(0x61, 0x0c), + ILI9881C_COMMAND_INSTR(0x62, 0x0d), + ILI9881C_COMMAND_INSTR(0x63, 0x0e), + ILI9881C_COMMAND_INSTR(0x64, 0x0f), + ILI9881C_COMMAND_INSTR(0x65, 0x10), + ILI9881C_COMMAND_INSTR(0x66, 0x11), + ILI9881C_COMMAND_INSTR(0x67, 0x08), + ILI9881C_COMMAND_INSTR(0x68, 0x02), + ILI9881C_COMMAND_INSTR(0x69, 0x0a), + ILI9881C_COMMAND_INSTR(0x6a, 0x02), + ILI9881C_COMMAND_INSTR(0x6b, 0x02), + ILI9881C_COMMAND_INSTR(0x6c, 0x02), + ILI9881C_COMMAND_INSTR(0x6d, 0x02), + ILI9881C_COMMAND_INSTR(0x6e, 0x02), + ILI9881C_COMMAND_INSTR(0x6f, 0x02), + ILI9881C_COMMAND_INSTR(0x70, 0x02), + ILI9881C_COMMAND_INSTR(0x71, 0x02), + ILI9881C_COMMAND_INSTR(0x72, 0x06), + ILI9881C_COMMAND_INSTR(0x73, 0x02), + ILI9881C_COMMAND_INSTR(0x74, 0x02), + ILI9881C_COMMAND_INSTR(0x75, 0x14), + ILI9881C_COMMAND_INSTR(0x76, 0x15), + ILI9881C_COMMAND_INSTR(0x77, 0x0f), + ILI9881C_COMMAND_INSTR(0x78, 0x0e), + ILI9881C_COMMAND_INSTR(0x79, 0x0d), + ILI9881C_COMMAND_INSTR(0x7a, 0x0c), + ILI9881C_COMMAND_INSTR(0x7b, 0x11), + ILI9881C_COMMAND_INSTR(0x7c, 0x10), + ILI9881C_COMMAND_INSTR(0x7d, 0x06), + ILI9881C_COMMAND_INSTR(0x7e, 0x02), + ILI9881C_COMMAND_INSTR(0x7f, 0x0a), + ILI9881C_COMMAND_INSTR(0x80, 0x02), + ILI9881C_COMMAND_INSTR(0x81, 0x02), + ILI9881C_COMMAND_INSTR(0x82, 0x02), + ILI9881C_COMMAND_INSTR(0x83, 0x02), + ILI9881C_COMMAND_INSTR(0x84, 0x02), + ILI9881C_COMMAND_INSTR(0x85, 0x02), + ILI9881C_COMMAND_INSTR(0x86, 0x02), + ILI9881C_COMMAND_INSTR(0x87, 0x02), + ILI9881C_COMMAND_INSTR(0x88, 0x08), + ILI9881C_COMMAND_INSTR(0x89, 0x02), + ILI9881C_COMMAND_INSTR(0x8a, 0x02), + ILI9881C_SWITCH_PAGE_INSTR(4), + ILI9881C_COMMAND_INSTR(0x00, 0x80), + ILI9881C_COMMAND_INSTR(0x70, 0x00), + ILI9881C_COMMAND_INSTR(0x71, 0x00), + ILI9881C_COMMAND_INSTR(0x66, 0xfe), + ILI9881C_COMMAND_INSTR(0x82, 0x15), + ILI9881C_COMMAND_INSTR(0x84, 0x15), + ILI9881C_COMMAND_INSTR(0x85, 0x15), + ILI9881C_COMMAND_INSTR(0x3a, 0x24), + ILI9881C_COMMAND_INSTR(0x32, 0xac), + ILI9881C_COMMAND_INSTR(0x8c, 0x80), + ILI9881C_COMMAND_INSTR(0x3c, 0xf5), + ILI9881C_COMMAND_INSTR(0x88, 0x33), + ILI9881C_SWITCH_PAGE_INSTR(1), + ILI9881C_COMMAND_INSTR(0x22, 0x0a), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x53, 0x78), + ILI9881C_COMMAND_INSTR(0x55, 0x7b), + ILI9881C_COMMAND_INSTR(0x60, 0x20), + ILI9881C_COMMAND_INSTR(0x61, 0x00), + ILI9881C_COMMAND_INSTR(0x62, 0x0d), + ILI9881C_COMMAND_INSTR(0x63, 0x00), + ILI9881C_COMMAND_INSTR(0xa0, 0x00), + ILI9881C_COMMAND_INSTR(0xa1, 0x10), + ILI9881C_COMMAND_INSTR(0xa2, 0x1c), + ILI9881C_COMMAND_INSTR(0xa3, 0x13), + ILI9881C_COMMAND_INSTR(0xa4, 0x15), + ILI9881C_COMMAND_INSTR(0xa5, 0x26), + ILI9881C_COMMAND_INSTR(0xa6, 0x1a), + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), + ILI9881C_COMMAND_INSTR(0xa8, 0x67), + ILI9881C_COMMAND_INSTR(0xa9, 0x1c), + ILI9881C_COMMAND_INSTR(0xaa, 0x29), + ILI9881C_COMMAND_INSTR(0xab, 0x5b), + ILI9881C_COMMAND_INSTR(0xac, 0x26), + ILI9881C_COMMAND_INSTR(0xad, 0x28), + ILI9881C_COMMAND_INSTR(0xae, 0x5c), + ILI9881C_COMMAND_INSTR(0xaf, 0x30), + ILI9881C_COMMAND_INSTR(0xb0, 0x31), + ILI9881C_COMMAND_INSTR(0xb1, 0x32), + ILI9881C_COMMAND_INSTR(0xb2, 0x00), + ILI9881C_COMMAND_INSTR(0xb1, 0x2e), + ILI9881C_COMMAND_INSTR(0xb2, 0x32), + ILI9881C_COMMAND_INSTR(0xb3, 0x00), + ILI9881C_COMMAND_INSTR(0xb6, 0x02), + ILI9881C_COMMAND_INSTR(0xb7, 0x03), + ILI9881C_COMMAND_INSTR(0xc0, 0x00), + ILI9881C_COMMAND_INSTR(0xc1, 0x10), + ILI9881C_COMMAND_INSTR(0xc2, 0x1c), + ILI9881C_COMMAND_INSTR(0xc3, 0x13), + ILI9881C_COMMAND_INSTR(0xc4, 0x15), + ILI9881C_COMMAND_INSTR(0xc5, 0x26), + ILI9881C_COMMAND_INSTR(0xc6, 0x1a), + ILI9881C_COMMAND_INSTR(0xc7, 0x1d), + ILI9881C_COMMAND_INSTR(0xc8, 0x67), + ILI9881C_COMMAND_INSTR(0xc9, 0x1c), + ILI9881C_COMMAND_INSTR(0xca, 0x29), + ILI9881C_COMMAND_INSTR(0xcb, 0x5b), + ILI9881C_COMMAND_INSTR(0xcc, 0x26), + ILI9881C_COMMAND_INSTR(0xcd, 0x28), + ILI9881C_COMMAND_INSTR(0xce, 0x5c), + ILI9881C_COMMAND_INSTR(0xcf, 0x30), + ILI9881C_COMMAND_INSTR(0xd0, 0x31), + ILI9881C_COMMAND_INSTR(0xd1, 0x2e), + ILI9881C_COMMAND_INSTR(0xd2, 0x32), + ILI9881C_COMMAND_INSTR(0xd3, 0x00), + ILI9881C_SWITCH_PAGE_INSTR(0), +}; + static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_SWITCH_PAGE_INSTR(3), ILI9881C_COMMAND_INSTR(0x01, 0x00), @@ -831,12 +1029,12 @@ static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_COMMAND_INSTR(0x07, 0x02), ILI9881C_COMMAND_INSTR(0x08, 0x02), ILI9881C_COMMAND_INSTR(0x09, 0x00), - ILI9881C_COMMAND_INSTR(0x0A, 0x00), - ILI9881C_COMMAND_INSTR(0x0B, 0x00), - ILI9881C_COMMAND_INSTR(0x0C, 0x00), - ILI9881C_COMMAND_INSTR(0x0D, 0x00), - ILI9881C_COMMAND_INSTR(0x0E, 0x00), - ILI9881C_COMMAND_INSTR(0x0F, 0x00), + ILI9881C_COMMAND_INSTR(0x0a, 0x00), + ILI9881C_COMMAND_INSTR(0x0b, 0x00), + ILI9881C_COMMAND_INSTR(0x0c, 0x00), + ILI9881C_COMMAND_INSTR(0x0d, 0x00), + ILI9881C_COMMAND_INSTR(0x0e, 0x00), + ILI9881C_COMMAND_INSTR(0x0f, 0x00), ILI9881C_COMMAND_INSTR(0x10, 0x00), ILI9881C_COMMAND_INSTR(0x11, 0x00), @@ -848,12 +1046,12 @@ static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_COMMAND_INSTR(0x17, 0x00), ILI9881C_COMMAND_INSTR(0x18, 0x08), ILI9881C_COMMAND_INSTR(0x19, 0x00), - ILI9881C_COMMAND_INSTR(0x1A, 0x00), - ILI9881C_COMMAND_INSTR(0x1B, 0x00), - ILI9881C_COMMAND_INSTR(0x1C, 0x00), - ILI9881C_COMMAND_INSTR(0x1D, 0x00), - ILI9881C_COMMAND_INSTR(0x1E, 0xC0), - ILI9881C_COMMAND_INSTR(0x1F, 0x80), + ILI9881C_COMMAND_INSTR(0x1a, 0x00), + ILI9881C_COMMAND_INSTR(0x1b, 0x00), + ILI9881C_COMMAND_INSTR(0x1c, 0x00), + ILI9881C_COMMAND_INSTR(0x1d, 0x00), + ILI9881C_COMMAND_INSTR(0x1e, 0xc0), + ILI9881C_COMMAND_INSTR(0x1f, 0x80), ILI9881C_COMMAND_INSTR(0x20, 0x02), ILI9881C_COMMAND_INSTR(0x21, 0x09), @@ -865,12 +1063,12 @@ static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_COMMAND_INSTR(0x27, 0x00), ILI9881C_COMMAND_INSTR(0x28, 0x55), ILI9881C_COMMAND_INSTR(0x29, 0x03), - ILI9881C_COMMAND_INSTR(0x2A, 0x00), - ILI9881C_COMMAND_INSTR(0x2B, 0x00), - ILI9881C_COMMAND_INSTR(0x2C, 0x00), - ILI9881C_COMMAND_INSTR(0x2D, 0x00), - ILI9881C_COMMAND_INSTR(0x2E, 0x00), - ILI9881C_COMMAND_INSTR(0x2F, 0x00), + ILI9881C_COMMAND_INSTR(0x2a, 0x00), + ILI9881C_COMMAND_INSTR(0x2b, 0x00), + ILI9881C_COMMAND_INSTR(0x2c, 0x00), + ILI9881C_COMMAND_INSTR(0x2d, 0x00), + ILI9881C_COMMAND_INSTR(0x2e, 0x00), + ILI9881C_COMMAND_INSTR(0x2f, 0x00), ILI9881C_COMMAND_INSTR(0x30, 0x00), ILI9881C_COMMAND_INSTR(0x31, 0x00), @@ -880,54 +1078,54 @@ static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_COMMAND_INSTR(0x35, 0x05), ILI9881C_COMMAND_INSTR(0x36, 0x05), ILI9881C_COMMAND_INSTR(0x37, 0x00), - ILI9881C_COMMAND_INSTR(0x38, 0x3C), + ILI9881C_COMMAND_INSTR(0x38, 0x3c), ILI9881C_COMMAND_INSTR(0x39, 0x35), - ILI9881C_COMMAND_INSTR(0x3A, 0x00), - ILI9881C_COMMAND_INSTR(0x3B, 0x40), - ILI9881C_COMMAND_INSTR(0x3C, 0x00), - ILI9881C_COMMAND_INSTR(0x3D, 0x00), - ILI9881C_COMMAND_INSTR(0x3E, 0x00), - ILI9881C_COMMAND_INSTR(0x3F, 0x00), + ILI9881C_COMMAND_INSTR(0x3a, 0x00), + ILI9881C_COMMAND_INSTR(0x3b, 0x40), + ILI9881C_COMMAND_INSTR(0x3c, 0x00), + ILI9881C_COMMAND_INSTR(0x3d, 0x00), + ILI9881C_COMMAND_INSTR(0x3e, 0x00), + ILI9881C_COMMAND_INSTR(0x3f, 0x00), ILI9881C_COMMAND_INSTR(0x40, 0x00), ILI9881C_COMMAND_INSTR(0x41, 0x88), ILI9881C_COMMAND_INSTR(0x42, 0x00), ILI9881C_COMMAND_INSTR(0x43, 0x00), - ILI9881C_COMMAND_INSTR(0x44, 0x1F), + ILI9881C_COMMAND_INSTR(0x44, 0x1f), ILI9881C_COMMAND_INSTR(0x50, 0x01), ILI9881C_COMMAND_INSTR(0x51, 0x23), ILI9881C_COMMAND_INSTR(0x52, 0x45), ILI9881C_COMMAND_INSTR(0x53, 0x67), ILI9881C_COMMAND_INSTR(0x54, 0x89), - ILI9881C_COMMAND_INSTR(0x55, 0xaB), + ILI9881C_COMMAND_INSTR(0x55, 0xab), ILI9881C_COMMAND_INSTR(0x56, 0x01), ILI9881C_COMMAND_INSTR(0x57, 0x23), ILI9881C_COMMAND_INSTR(0x58, 0x45), ILI9881C_COMMAND_INSTR(0x59, 0x67), - ILI9881C_COMMAND_INSTR(0x5A, 0x89), - ILI9881C_COMMAND_INSTR(0x5B, 0xAB), - ILI9881C_COMMAND_INSTR(0x5C, 0xCD), - ILI9881C_COMMAND_INSTR(0x5D, 0xEF), - ILI9881C_COMMAND_INSTR(0x5E, 0x03), - ILI9881C_COMMAND_INSTR(0x5F, 0x14), + ILI9881C_COMMAND_INSTR(0x5a, 0x89), + ILI9881C_COMMAND_INSTR(0x5b, 0xab), + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), + ILI9881C_COMMAND_INSTR(0x5d, 0xef), + ILI9881C_COMMAND_INSTR(0x5e, 0x03), + ILI9881C_COMMAND_INSTR(0x5f, 0x14), ILI9881C_COMMAND_INSTR(0x60, 0x15), - ILI9881C_COMMAND_INSTR(0x61, 0x0C), - ILI9881C_COMMAND_INSTR(0x62, 0x0D), - ILI9881C_COMMAND_INSTR(0x63, 0x0E), - ILI9881C_COMMAND_INSTR(0x64, 0x0F), + ILI9881C_COMMAND_INSTR(0x61, 0x0c), + ILI9881C_COMMAND_INSTR(0x62, 0x0d), + ILI9881C_COMMAND_INSTR(0x63, 0x0e), + ILI9881C_COMMAND_INSTR(0x64, 0x0f), ILI9881C_COMMAND_INSTR(0x65, 0x10), ILI9881C_COMMAND_INSTR(0x66, 0x11), ILI9881C_COMMAND_INSTR(0x67, 0x08), ILI9881C_COMMAND_INSTR(0x68, 0x02), - ILI9881C_COMMAND_INSTR(0x69, 0x0A), - ILI9881C_COMMAND_INSTR(0x6A, 0x02), - ILI9881C_COMMAND_INSTR(0x6B, 0x02), - ILI9881C_COMMAND_INSTR(0x6C, 0x02), - ILI9881C_COMMAND_INSTR(0x6D, 0x02), - ILI9881C_COMMAND_INSTR(0x6E, 0x02), - ILI9881C_COMMAND_INSTR(0x6F, 0x02), + ILI9881C_COMMAND_INSTR(0x69, 0x0a), + ILI9881C_COMMAND_INSTR(0x6a, 0x02), + ILI9881C_COMMAND_INSTR(0x6b, 0x02), + ILI9881C_COMMAND_INSTR(0x6c, 0x02), + ILI9881C_COMMAND_INSTR(0x6d, 0x02), + ILI9881C_COMMAND_INSTR(0x6e, 0x02), + ILI9881C_COMMAND_INSTR(0x6f, 0x02), ILI9881C_COMMAND_INSTR(0x70, 0x02), ILI9881C_COMMAND_INSTR(0x71, 0x02), @@ -936,15 +1134,15 @@ static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_COMMAND_INSTR(0x74, 0x02), ILI9881C_COMMAND_INSTR(0x75, 0x14), ILI9881C_COMMAND_INSTR(0x76, 0x15), - ILI9881C_COMMAND_INSTR(0x77, 0x0F), - ILI9881C_COMMAND_INSTR(0x78, 0x0E), - ILI9881C_COMMAND_INSTR(0x79, 0x0D), - ILI9881C_COMMAND_INSTR(0x7A, 0x0C), - ILI9881C_COMMAND_INSTR(0x7B, 0x11), - ILI9881C_COMMAND_INSTR(0x7C, 0x10), - ILI9881C_COMMAND_INSTR(0x7D, 0x06), - ILI9881C_COMMAND_INSTR(0x7E, 0x02), - ILI9881C_COMMAND_INSTR(0x7F, 0x0A), + ILI9881C_COMMAND_INSTR(0x77, 0x0f), + ILI9881C_COMMAND_INSTR(0x78, 0x0e), + ILI9881C_COMMAND_INSTR(0x79, 0x0d), + ILI9881C_COMMAND_INSTR(0x7a, 0x0c), + ILI9881C_COMMAND_INSTR(0x7b, 0x11), + ILI9881C_COMMAND_INSTR(0x7c, 0x10), + ILI9881C_COMMAND_INSTR(0x7d, 0x06), + ILI9881C_COMMAND_INSTR(0x7e, 0x02), + ILI9881C_COMMAND_INSTR(0x7f, 0x0a), ILI9881C_COMMAND_INSTR(0x80, 0x02), ILI9881C_COMMAND_INSTR(0x81, 0x02), @@ -956,74 +1154,74 @@ static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_COMMAND_INSTR(0x87, 0x02), ILI9881C_COMMAND_INSTR(0x88, 0x08), ILI9881C_COMMAND_INSTR(0x89, 0x02), - ILI9881C_COMMAND_INSTR(0x8A, 0x02), + ILI9881C_COMMAND_INSTR(0x8a, 0x02), ILI9881C_SWITCH_PAGE_INSTR(4), ILI9881C_COMMAND_INSTR(0x00, 0x80), ILI9881C_COMMAND_INSTR(0x70, 0x00), ILI9881C_COMMAND_INSTR(0x71, 0x00), - ILI9881C_COMMAND_INSTR(0x66, 0xFE), + ILI9881C_COMMAND_INSTR(0x66, 0xfe), ILI9881C_COMMAND_INSTR(0x82, 0x15), ILI9881C_COMMAND_INSTR(0x84, 0x15), ILI9881C_COMMAND_INSTR(0x85, 0x15), ILI9881C_COMMAND_INSTR(0x3a, 0x24), - ILI9881C_COMMAND_INSTR(0x32, 0xAC), - ILI9881C_COMMAND_INSTR(0x8C, 0x80), - ILI9881C_COMMAND_INSTR(0x3C, 0xF5), + ILI9881C_COMMAND_INSTR(0x32, 0xac), + ILI9881C_COMMAND_INSTR(0x8c, 0x80), + ILI9881C_COMMAND_INSTR(0x3c, 0xf5), ILI9881C_COMMAND_INSTR(0x88, 0x33), ILI9881C_SWITCH_PAGE_INSTR(1), - ILI9881C_COMMAND_INSTR(0x22, 0x0A), + ILI9881C_COMMAND_INSTR(0x22, 0x0a), ILI9881C_COMMAND_INSTR(0x31, 0x00), ILI9881C_COMMAND_INSTR(0x53, 0x78), - ILI9881C_COMMAND_INSTR(0x50, 0x5B), - ILI9881C_COMMAND_INSTR(0x51, 0x5B), + ILI9881C_COMMAND_INSTR(0x50, 0x5b), + ILI9881C_COMMAND_INSTR(0x51, 0x5b), ILI9881C_COMMAND_INSTR(0x60, 0x20), ILI9881C_COMMAND_INSTR(0x61, 0x00), - ILI9881C_COMMAND_INSTR(0x62, 0x0D), + ILI9881C_COMMAND_INSTR(0x62, 0x0d), ILI9881C_COMMAND_INSTR(0x63, 0x00), - ILI9881C_COMMAND_INSTR(0xA0, 0x00), - ILI9881C_COMMAND_INSTR(0xA1, 0x10), - ILI9881C_COMMAND_INSTR(0xA2, 0x1C), - ILI9881C_COMMAND_INSTR(0xA3, 0x13), - ILI9881C_COMMAND_INSTR(0xA4, 0x15), - ILI9881C_COMMAND_INSTR(0xA5, 0x26), - ILI9881C_COMMAND_INSTR(0xA6, 0x1A), - ILI9881C_COMMAND_INSTR(0xA7, 0x1D), - ILI9881C_COMMAND_INSTR(0xA8, 0x67), - ILI9881C_COMMAND_INSTR(0xA9, 0x1C), - ILI9881C_COMMAND_INSTR(0xAA, 0x29), - ILI9881C_COMMAND_INSTR(0xAB, 0x5B), - ILI9881C_COMMAND_INSTR(0xAC, 0x26), - ILI9881C_COMMAND_INSTR(0xAD, 0x28), - ILI9881C_COMMAND_INSTR(0xAE, 0x5C), - ILI9881C_COMMAND_INSTR(0xAF, 0x30), - ILI9881C_COMMAND_INSTR(0xB0, 0x31), - ILI9881C_COMMAND_INSTR(0xB1, 0x2E), - ILI9881C_COMMAND_INSTR(0xB2, 0x32), - ILI9881C_COMMAND_INSTR(0xB3, 0x00), - - ILI9881C_COMMAND_INSTR(0xC0, 0x00), - ILI9881C_COMMAND_INSTR(0xC1, 0x10), - ILI9881C_COMMAND_INSTR(0xC2, 0x1C), - ILI9881C_COMMAND_INSTR(0xC3, 0x13), - ILI9881C_COMMAND_INSTR(0xC4, 0x15), - ILI9881C_COMMAND_INSTR(0xC5, 0x26), - ILI9881C_COMMAND_INSTR(0xC6, 0x1A), - ILI9881C_COMMAND_INSTR(0xC7, 0x1D), - ILI9881C_COMMAND_INSTR(0xC8, 0x67), - ILI9881C_COMMAND_INSTR(0xC9, 0x1C), - ILI9881C_COMMAND_INSTR(0xCA, 0x29), - ILI9881C_COMMAND_INSTR(0xCB, 0x5B), - ILI9881C_COMMAND_INSTR(0xCC, 0x26), - ILI9881C_COMMAND_INSTR(0xCD, 0x28), - ILI9881C_COMMAND_INSTR(0xCE, 0x5C), - ILI9881C_COMMAND_INSTR(0xCF, 0x30), - ILI9881C_COMMAND_INSTR(0xD0, 0x31), - ILI9881C_COMMAND_INSTR(0xD1, 0x2E), - ILI9881C_COMMAND_INSTR(0xD2, 0x32), - ILI9881C_COMMAND_INSTR(0xD3, 0x00), + ILI9881C_COMMAND_INSTR(0xa0, 0x00), + ILI9881C_COMMAND_INSTR(0xa1, 0x10), + ILI9881C_COMMAND_INSTR(0xa2, 0x1c), + ILI9881C_COMMAND_INSTR(0xa3, 0x13), + ILI9881C_COMMAND_INSTR(0xa4, 0x15), + ILI9881C_COMMAND_INSTR(0xa5, 0x26), + ILI9881C_COMMAND_INSTR(0xa6, 0x1a), + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), + ILI9881C_COMMAND_INSTR(0xa8, 0x67), + ILI9881C_COMMAND_INSTR(0xa9, 0x1c), + ILI9881C_COMMAND_INSTR(0xaa, 0x29), + ILI9881C_COMMAND_INSTR(0xab, 0x5b), + ILI9881C_COMMAND_INSTR(0xac, 0x26), + ILI9881C_COMMAND_INSTR(0xad, 0x28), + ILI9881C_COMMAND_INSTR(0xae, 0x5c), + ILI9881C_COMMAND_INSTR(0xaf, 0x30), + ILI9881C_COMMAND_INSTR(0xb0, 0x31), + ILI9881C_COMMAND_INSTR(0xb1, 0x2e), + ILI9881C_COMMAND_INSTR(0xb2, 0x32), + ILI9881C_COMMAND_INSTR(0xb3, 0x00), + + ILI9881C_COMMAND_INSTR(0xc0, 0x00), + ILI9881C_COMMAND_INSTR(0xc1, 0x10), + ILI9881C_COMMAND_INSTR(0xc2, 0x1c), + ILI9881C_COMMAND_INSTR(0xc3, 0x13), + ILI9881C_COMMAND_INSTR(0xc4, 0x15), + ILI9881C_COMMAND_INSTR(0xc5, 0x26), + ILI9881C_COMMAND_INSTR(0xc6, 0x1a), + ILI9881C_COMMAND_INSTR(0xc7, 0x1d), + ILI9881C_COMMAND_INSTR(0xc8, 0x67), + ILI9881C_COMMAND_INSTR(0xc9, 0x1c), + ILI9881C_COMMAND_INSTR(0xca, 0x29), + ILI9881C_COMMAND_INSTR(0xcb, 0x5b), + ILI9881C_COMMAND_INSTR(0xcc, 0x26), + ILI9881C_COMMAND_INSTR(0xcd, 0x28), + ILI9881C_COMMAND_INSTR(0xce, 0x5c), + ILI9881C_COMMAND_INSTR(0xcf, 0x30), + ILI9881C_COMMAND_INSTR(0xd0, 0x31), + ILI9881C_COMMAND_INSTR(0xd1, 0x2e), + ILI9881C_COMMAND_INSTR(0xd2, 0x32), + ILI9881C_COMMAND_INSTR(0xd3, 0x00), ILI9881C_SWITCH_PAGE_INSTR(0), }; @@ -1032,10 +1230,10 @@ static const struct ili9881c_instr am8001280g_init[] = { ILI9881C_COMMAND_INSTR(0x01, 0x00), ILI9881C_COMMAND_INSTR(0x02, 0x00), ILI9881C_COMMAND_INSTR(0x03, 0x73), - ILI9881C_COMMAND_INSTR(0x04, 0xD3), + ILI9881C_COMMAND_INSTR(0x04, 0xd3), ILI9881C_COMMAND_INSTR(0x05, 0x00), - ILI9881C_COMMAND_INSTR(0x06, 0x0A), - ILI9881C_COMMAND_INSTR(0x07, 0x0E), + ILI9881C_COMMAND_INSTR(0x06, 0x0a), + ILI9881C_COMMAND_INSTR(0x07, 0x0e), ILI9881C_COMMAND_INSTR(0x08, 0x00), ILI9881C_COMMAND_INSTR(0x09, 0x01), ILI9881C_COMMAND_INSTR(0x0a, 0x01), @@ -1117,10 +1315,10 @@ static const struct ili9881c_instr am8001280g_init[] = { ILI9881C_COMMAND_INSTR(0x5f, 0x02), ILI9881C_COMMAND_INSTR(0x60, 0x00), ILI9881C_COMMAND_INSTR(0x61, 0x01), - ILI9881C_COMMAND_INSTR(0x62, 0x0D), - ILI9881C_COMMAND_INSTR(0x63, 0x0C), - ILI9881C_COMMAND_INSTR(0x64, 0x0F), - ILI9881C_COMMAND_INSTR(0x65, 0x0E), + ILI9881C_COMMAND_INSTR(0x62, 0x0d), + ILI9881C_COMMAND_INSTR(0x63, 0x0c), + ILI9881C_COMMAND_INSTR(0x64, 0x0f), + ILI9881C_COMMAND_INSTR(0x65, 0x0e), ILI9881C_COMMAND_INSTR(0x66, 0x06), ILI9881C_COMMAND_INSTR(0x67, 0x07), ILI9881C_COMMAND_INSTR(0x68, 0x02), @@ -1139,10 +1337,10 @@ static const struct ili9881c_instr am8001280g_init[] = { ILI9881C_COMMAND_INSTR(0x75, 0x02), ILI9881C_COMMAND_INSTR(0x76, 0x00), ILI9881C_COMMAND_INSTR(0x77, 0x01), - ILI9881C_COMMAND_INSTR(0x78, 0x0D), - ILI9881C_COMMAND_INSTR(0x79, 0x0C), - ILI9881C_COMMAND_INSTR(0x7a, 0x0F), - ILI9881C_COMMAND_INSTR(0x7b, 0x0E), + ILI9881C_COMMAND_INSTR(0x78, 0x0d), + ILI9881C_COMMAND_INSTR(0x79, 0x0c), + ILI9881C_COMMAND_INSTR(0x7a, 0x0f), + ILI9881C_COMMAND_INSTR(0x7b, 0x0e), ILI9881C_COMMAND_INSTR(0x7c, 0x06), ILI9881C_COMMAND_INSTR(0x7d, 0x07), ILI9881C_COMMAND_INSTR(0x7e, 0x02), @@ -1157,7 +1355,7 @@ static const struct ili9881c_instr am8001280g_init[] = { ILI9881C_COMMAND_INSTR(0x87, 0x02), ILI9881C_COMMAND_INSTR(0x88, 0x02), ILI9881C_COMMAND_INSTR(0x89, 0x02), - ILI9881C_COMMAND_INSTR(0x8A, 0x02), + ILI9881C_COMMAND_INSTR(0x8a, 0x02), ILI9881C_SWITCH_PAGE_INSTR(4), ILI9881C_COMMAND_INSTR(0x6c, 0x15), @@ -1170,60 +1368,248 @@ static const struct ili9881c_instr am8001280g_init[] = { ILI9881C_COMMAND_INSTR(0xb2, 0xd1), ILI9881C_SWITCH_PAGE_INSTR(1), - ILI9881C_COMMAND_INSTR(0x22, 0x0A), - ILI9881C_COMMAND_INSTR(0x31, 0x0B), + ILI9881C_COMMAND_INSTR(0x22, 0x0a), + ILI9881C_COMMAND_INSTR(0x31, 0x0b), ILI9881C_COMMAND_INSTR(0x50, 0xa5), ILI9881C_COMMAND_INSTR(0x51, 0xa0), ILI9881C_COMMAND_INSTR(0x53, 0x70), - ILI9881C_COMMAND_INSTR(0x55, 0x7A), + ILI9881C_COMMAND_INSTR(0x55, 0x7a), ILI9881C_COMMAND_INSTR(0x60, 0x14), - ILI9881C_COMMAND_INSTR(0xA0, 0x00), - ILI9881C_COMMAND_INSTR(0xA1, 0x53), - ILI9881C_COMMAND_INSTR(0xA2, 0x50), - ILI9881C_COMMAND_INSTR(0xA3, 0x20), - ILI9881C_COMMAND_INSTR(0xA4, 0x27), - ILI9881C_COMMAND_INSTR(0xA5, 0x33), - ILI9881C_COMMAND_INSTR(0xA6, 0x25), - ILI9881C_COMMAND_INSTR(0xA7, 0x25), - ILI9881C_COMMAND_INSTR(0xA8, 0xD4), - ILI9881C_COMMAND_INSTR(0xA9, 0x1A), - ILI9881C_COMMAND_INSTR(0xAA, 0x2B), - ILI9881C_COMMAND_INSTR(0xAB, 0xB5), - ILI9881C_COMMAND_INSTR(0xAC, 0x19), - ILI9881C_COMMAND_INSTR(0xAD, 0x18), - ILI9881C_COMMAND_INSTR(0xAE, 0x53), - ILI9881C_COMMAND_INSTR(0xAF, 0x1A), - ILI9881C_COMMAND_INSTR(0xB0, 0x25), - ILI9881C_COMMAND_INSTR(0xB1, 0x62), - ILI9881C_COMMAND_INSTR(0xB2, 0x6A), - ILI9881C_COMMAND_INSTR(0xB3, 0x31), - - ILI9881C_COMMAND_INSTR(0xC0, 0x00), - ILI9881C_COMMAND_INSTR(0xC1, 0x53), - ILI9881C_COMMAND_INSTR(0xC2, 0x50), - ILI9881C_COMMAND_INSTR(0xC3, 0x20), - ILI9881C_COMMAND_INSTR(0xC4, 0x27), - ILI9881C_COMMAND_INSTR(0xC5, 0x33), - ILI9881C_COMMAND_INSTR(0xC6, 0x25), - ILI9881C_COMMAND_INSTR(0xC7, 0x25), - ILI9881C_COMMAND_INSTR(0xC8, 0xD4), - ILI9881C_COMMAND_INSTR(0xC9, 0x1A), - ILI9881C_COMMAND_INSTR(0xCA, 0x2B), - ILI9881C_COMMAND_INSTR(0xCB, 0xB5), - ILI9881C_COMMAND_INSTR(0xCC, 0x19), - ILI9881C_COMMAND_INSTR(0xCD, 0x18), - ILI9881C_COMMAND_INSTR(0xCE, 0x53), - ILI9881C_COMMAND_INSTR(0xCF, 0x1A), - ILI9881C_COMMAND_INSTR(0xD0, 0x25), - ILI9881C_COMMAND_INSTR(0xD1, 0x62), - ILI9881C_COMMAND_INSTR(0xD2, 0x6A), - ILI9881C_COMMAND_INSTR(0xD3, 0x31), + ILI9881C_COMMAND_INSTR(0xa0, 0x00), + ILI9881C_COMMAND_INSTR(0xa1, 0x53), + ILI9881C_COMMAND_INSTR(0xa2, 0x50), + ILI9881C_COMMAND_INSTR(0xa3, 0x20), + ILI9881C_COMMAND_INSTR(0xa4, 0x27), + ILI9881C_COMMAND_INSTR(0xa5, 0x33), + ILI9881C_COMMAND_INSTR(0xa6, 0x25), + ILI9881C_COMMAND_INSTR(0xa7, 0x25), + ILI9881C_COMMAND_INSTR(0xa8, 0xd4), + ILI9881C_COMMAND_INSTR(0xa9, 0x1a), + ILI9881C_COMMAND_INSTR(0xaa, 0x2b), + ILI9881C_COMMAND_INSTR(0xab, 0xb5), + ILI9881C_COMMAND_INSTR(0xac, 0x19), + ILI9881C_COMMAND_INSTR(0xad, 0x18), + ILI9881C_COMMAND_INSTR(0xae, 0x53), + ILI9881C_COMMAND_INSTR(0xaf, 0x1a), + ILI9881C_COMMAND_INSTR(0xb0, 0x25), + ILI9881C_COMMAND_INSTR(0xb1, 0x62), + ILI9881C_COMMAND_INSTR(0xb2, 0x6a), + ILI9881C_COMMAND_INSTR(0xb3, 0x31), + + ILI9881C_COMMAND_INSTR(0xc0, 0x00), + ILI9881C_COMMAND_INSTR(0xc1, 0x53), + ILI9881C_COMMAND_INSTR(0xc2, 0x50), + ILI9881C_COMMAND_INSTR(0xc3, 0x20), + ILI9881C_COMMAND_INSTR(0xc4, 0x27), + ILI9881C_COMMAND_INSTR(0xc5, 0x33), + ILI9881C_COMMAND_INSTR(0xc6, 0x25), + ILI9881C_COMMAND_INSTR(0xc7, 0x25), + ILI9881C_COMMAND_INSTR(0xc8, 0xd4), + ILI9881C_COMMAND_INSTR(0xc9, 0x1a), + ILI9881C_COMMAND_INSTR(0xca, 0x2b), + ILI9881C_COMMAND_INSTR(0xcb, 0xb5), + ILI9881C_COMMAND_INSTR(0xcc, 0x19), + ILI9881C_COMMAND_INSTR(0xcd, 0x18), + ILI9881C_COMMAND_INSTR(0xce, 0x53), + ILI9881C_COMMAND_INSTR(0xcf, 0x1a), + ILI9881C_COMMAND_INSTR(0xd0, 0x25), + ILI9881C_COMMAND_INSTR(0xd1, 0x62), + ILI9881C_COMMAND_INSTR(0xd2, 0x6a), + ILI9881C_COMMAND_INSTR(0xd3, 0x31), ILI9881C_SWITCH_PAGE_INSTR(0), ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c), ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00), }; +static const struct ili9881c_instr rpi_5inch_init[] = { + ILI9881C_SWITCH_PAGE_INSTR(3), + ILI9881C_COMMAND_INSTR(0x01, 0x00), + ILI9881C_COMMAND_INSTR(0x02, 0x00), + ILI9881C_COMMAND_INSTR(0x03, 0x73), + ILI9881C_COMMAND_INSTR(0x04, 0x73), + ILI9881C_COMMAND_INSTR(0x05, 0x00), + ILI9881C_COMMAND_INSTR(0x06, 0x06), + ILI9881C_COMMAND_INSTR(0x07, 0x02), + ILI9881C_COMMAND_INSTR(0x08, 0x00), + ILI9881C_COMMAND_INSTR(0x09, 0x01), + ILI9881C_COMMAND_INSTR(0x0a, 0x01), + ILI9881C_COMMAND_INSTR(0x0b, 0x01), + ILI9881C_COMMAND_INSTR(0x0c, 0x01), + ILI9881C_COMMAND_INSTR(0x0d, 0x01), + ILI9881C_COMMAND_INSTR(0x0e, 0x01), + ILI9881C_COMMAND_INSTR(0x0f, 0x01), + ILI9881C_COMMAND_INSTR(0x10, 0x01), + ILI9881C_COMMAND_INSTR(0x11, 0x00), + ILI9881C_COMMAND_INSTR(0x12, 0x00), + ILI9881C_COMMAND_INSTR(0x13, 0x01), + ILI9881C_COMMAND_INSTR(0x14, 0x00), + ILI9881C_COMMAND_INSTR(0x15, 0x00), + ILI9881C_COMMAND_INSTR(0x16, 0x00), + ILI9881C_COMMAND_INSTR(0x17, 0x00), + ILI9881C_COMMAND_INSTR(0x18, 0x00), + ILI9881C_COMMAND_INSTR(0x19, 0x00), + ILI9881C_COMMAND_INSTR(0x1a, 0x00), + ILI9881C_COMMAND_INSTR(0x1b, 0x00), + ILI9881C_COMMAND_INSTR(0x1c, 0x00), + ILI9881C_COMMAND_INSTR(0x1d, 0x00), + ILI9881C_COMMAND_INSTR(0x1e, 0xc0), + ILI9881C_COMMAND_INSTR(0x1f, 0x80), + ILI9881C_COMMAND_INSTR(0x20, 0x04), + ILI9881C_COMMAND_INSTR(0x21, 0x03), + ILI9881C_COMMAND_INSTR(0x22, 0x00), + ILI9881C_COMMAND_INSTR(0x23, 0x00), + ILI9881C_COMMAND_INSTR(0x24, 0x00), + ILI9881C_COMMAND_INSTR(0x25, 0x00), + ILI9881C_COMMAND_INSTR(0x26, 0x00), + ILI9881C_COMMAND_INSTR(0x27, 0x00), + ILI9881C_COMMAND_INSTR(0x28, 0x33), + ILI9881C_COMMAND_INSTR(0x29, 0x03), + ILI9881C_COMMAND_INSTR(0x2a, 0x00), + ILI9881C_COMMAND_INSTR(0x2b, 0x00), + ILI9881C_COMMAND_INSTR(0x2c, 0x00), + ILI9881C_COMMAND_INSTR(0x2d, 0x00), + ILI9881C_COMMAND_INSTR(0x2e, 0x00), + ILI9881C_COMMAND_INSTR(0x2f, 0x00), + ILI9881C_COMMAND_INSTR(0x30, 0x00), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x32, 0x00), + ILI9881C_COMMAND_INSTR(0x33, 0x00), + ILI9881C_COMMAND_INSTR(0x34, 0x03), + ILI9881C_COMMAND_INSTR(0x35, 0x00), + ILI9881C_COMMAND_INSTR(0x36, 0x03), + ILI9881C_COMMAND_INSTR(0x37, 0x00), + ILI9881C_COMMAND_INSTR(0x38, 0x00), + ILI9881C_COMMAND_INSTR(0x39, 0x00), + ILI9881C_COMMAND_INSTR(0x3a, 0x00), + ILI9881C_COMMAND_INSTR(0x3b, 0x00), + ILI9881C_COMMAND_INSTR(0x3c, 0x00), + ILI9881C_COMMAND_INSTR(0x3d, 0x00), + ILI9881C_COMMAND_INSTR(0x3e, 0x00), + ILI9881C_COMMAND_INSTR(0x3f, 0x00), + ILI9881C_COMMAND_INSTR(0x40, 0x00), + ILI9881C_COMMAND_INSTR(0x41, 0x00), + ILI9881C_COMMAND_INSTR(0x42, 0x00), + ILI9881C_COMMAND_INSTR(0x43, 0x00), + ILI9881C_COMMAND_INSTR(0x44, 0x00), + ILI9881C_COMMAND_INSTR(0x50, 0x01), + ILI9881C_COMMAND_INSTR(0x51, 0x23), + ILI9881C_COMMAND_INSTR(0x52, 0x45), + ILI9881C_COMMAND_INSTR(0x53, 0x67), + ILI9881C_COMMAND_INSTR(0x54, 0x89), + ILI9881C_COMMAND_INSTR(0x55, 0xab), + ILI9881C_COMMAND_INSTR(0x56, 0x01), + ILI9881C_COMMAND_INSTR(0x57, 0x23), + ILI9881C_COMMAND_INSTR(0x58, 0x45), + ILI9881C_COMMAND_INSTR(0x59, 0x67), + ILI9881C_COMMAND_INSTR(0x5a, 0x89), + ILI9881C_COMMAND_INSTR(0x5b, 0xab), + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), + ILI9881C_COMMAND_INSTR(0x5d, 0xef), + ILI9881C_COMMAND_INSTR(0x5e, 0x10), + ILI9881C_COMMAND_INSTR(0x5f, 0x09), + ILI9881C_COMMAND_INSTR(0x60, 0x08), + ILI9881C_COMMAND_INSTR(0x61, 0x0f), + ILI9881C_COMMAND_INSTR(0x62, 0x0e), + ILI9881C_COMMAND_INSTR(0x63, 0x0d), + ILI9881C_COMMAND_INSTR(0x64, 0x0c), + ILI9881C_COMMAND_INSTR(0x65, 0x02), + ILI9881C_COMMAND_INSTR(0x66, 0x02), + ILI9881C_COMMAND_INSTR(0x67, 0x02), + ILI9881C_COMMAND_INSTR(0x68, 0x02), + ILI9881C_COMMAND_INSTR(0x69, 0x02), + ILI9881C_COMMAND_INSTR(0x6a, 0x02), + ILI9881C_COMMAND_INSTR(0x6b, 0x02), + ILI9881C_COMMAND_INSTR(0x6c, 0x02), + ILI9881C_COMMAND_INSTR(0x6d, 0x02), + ILI9881C_COMMAND_INSTR(0x6e, 0x02), + ILI9881C_COMMAND_INSTR(0x6f, 0x02), + ILI9881C_COMMAND_INSTR(0x70, 0x02), + ILI9881C_COMMAND_INSTR(0x71, 0x06), + ILI9881C_COMMAND_INSTR(0x72, 0x07), + ILI9881C_COMMAND_INSTR(0x73, 0x02), + ILI9881C_COMMAND_INSTR(0x74, 0x02), + ILI9881C_COMMAND_INSTR(0x75, 0x06), + ILI9881C_COMMAND_INSTR(0x76, 0x07), + ILI9881C_COMMAND_INSTR(0x77, 0x0e), + ILI9881C_COMMAND_INSTR(0x78, 0x0f), + ILI9881C_COMMAND_INSTR(0x79, 0x0c), + ILI9881C_COMMAND_INSTR(0x7a, 0x0d), + ILI9881C_COMMAND_INSTR(0x7b, 0x02), + ILI9881C_COMMAND_INSTR(0x7c, 0x02), + ILI9881C_COMMAND_INSTR(0x7d, 0x02), + ILI9881C_COMMAND_INSTR(0x7e, 0x02), + ILI9881C_COMMAND_INSTR(0x7f, 0x02), + ILI9881C_COMMAND_INSTR(0x80, 0x02), + ILI9881C_COMMAND_INSTR(0x81, 0x02), + ILI9881C_COMMAND_INSTR(0x82, 0x02), + ILI9881C_COMMAND_INSTR(0x83, 0x02), + ILI9881C_COMMAND_INSTR(0x84, 0x02), + ILI9881C_COMMAND_INSTR(0x85, 0x02), + ILI9881C_COMMAND_INSTR(0x86, 0x02), + ILI9881C_COMMAND_INSTR(0x87, 0x09), + ILI9881C_COMMAND_INSTR(0x88, 0x08), + ILI9881C_COMMAND_INSTR(0x89, 0x02), + ILI9881C_COMMAND_INSTR(0x8a, 0x02), + ILI9881C_SWITCH_PAGE_INSTR(4), + ILI9881C_COMMAND_INSTR(0x6c, 0x15), + ILI9881C_COMMAND_INSTR(0x6e, 0x2a), + ILI9881C_COMMAND_INSTR(0x6f, 0x57), + ILI9881C_COMMAND_INSTR(0x3a, 0xa4), + ILI9881C_COMMAND_INSTR(0x8d, 0x1a), + ILI9881C_COMMAND_INSTR(0x87, 0xba), + ILI9881C_COMMAND_INSTR(0x26, 0x76), + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), + ILI9881C_SWITCH_PAGE_INSTR(1), + ILI9881C_COMMAND_INSTR(0x22, 0x0a), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x53, 0x35), + ILI9881C_COMMAND_INSTR(0x55, 0x50), + ILI9881C_COMMAND_INSTR(0x50, 0xaf), + ILI9881C_COMMAND_INSTR(0x51, 0xaf), + ILI9881C_COMMAND_INSTR(0x60, 0x14), + ILI9881C_COMMAND_INSTR(0xa0, 0x08), + ILI9881C_COMMAND_INSTR(0xa1, 0x1d), + ILI9881C_COMMAND_INSTR(0xa2, 0x2c), + ILI9881C_COMMAND_INSTR(0xa3, 0x14), + ILI9881C_COMMAND_INSTR(0xa4, 0x19), + ILI9881C_COMMAND_INSTR(0xa5, 0x2e), + ILI9881C_COMMAND_INSTR(0xa6, 0x22), + ILI9881C_COMMAND_INSTR(0xa7, 0x23), + ILI9881C_COMMAND_INSTR(0xa8, 0x97), + ILI9881C_COMMAND_INSTR(0xa9, 0x1e), + ILI9881C_COMMAND_INSTR(0xaa, 0x29), + ILI9881C_COMMAND_INSTR(0xab, 0x7b), + ILI9881C_COMMAND_INSTR(0xac, 0x18), + ILI9881C_COMMAND_INSTR(0xad, 0x17), + ILI9881C_COMMAND_INSTR(0xae, 0x4b), + ILI9881C_COMMAND_INSTR(0xaf, 0x1f), + ILI9881C_COMMAND_INSTR(0xb0, 0x27), + ILI9881C_COMMAND_INSTR(0xb1, 0x52), + ILI9881C_COMMAND_INSTR(0xb2, 0x63), + ILI9881C_COMMAND_INSTR(0xb3, 0x39), + ILI9881C_COMMAND_INSTR(0xc0, 0x08), + ILI9881C_COMMAND_INSTR(0xc1, 0x1d), + ILI9881C_COMMAND_INSTR(0xc2, 0x2c), + ILI9881C_COMMAND_INSTR(0xc3, 0x14), + ILI9881C_COMMAND_INSTR(0xc4, 0x19), + ILI9881C_COMMAND_INSTR(0xc5, 0x2e), + ILI9881C_COMMAND_INSTR(0xc6, 0x22), + ILI9881C_COMMAND_INSTR(0xc7, 0x23), + ILI9881C_COMMAND_INSTR(0xc8, 0x97), + ILI9881C_COMMAND_INSTR(0xc9, 0x1e), + ILI9881C_COMMAND_INSTR(0xca, 0x29), + ILI9881C_COMMAND_INSTR(0xcb, 0x7b), + ILI9881C_COMMAND_INSTR(0xcc, 0x18), + ILI9881C_COMMAND_INSTR(0xcd, 0x17), + ILI9881C_COMMAND_INSTR(0xce, 0x4b), + ILI9881C_COMMAND_INSTR(0xcf, 0x1f), + ILI9881C_COMMAND_INSTR(0xd0, 0x27), + ILI9881C_COMMAND_INSTR(0xd1, 0x52), + ILI9881C_COMMAND_INSTR(0xd2, 0x63), + ILI9881C_COMMAND_INSTR(0xd3, 0x39), +}; + static const struct ili9881c_instr rpi_7inch_init[] = { ILI9881C_SWITCH_PAGE_INSTR(3), ILI9881C_COMMAND_INSTR(0x01, 0x00), @@ -1352,22 +1738,22 @@ static const struct ili9881c_instr rpi_7inch_init[] = { ILI9881C_COMMAND_INSTR(0x87, 0x02), ILI9881C_COMMAND_INSTR(0x88, 0x02), ILI9881C_COMMAND_INSTR(0x89, 0x02), - ILI9881C_COMMAND_INSTR(0x8A, 0x02), + ILI9881C_COMMAND_INSTR(0x8a, 0x02), ILI9881C_SWITCH_PAGE_INSTR(4), - ILI9881C_COMMAND_INSTR(0x6C, 0x15), - ILI9881C_COMMAND_INSTR(0x6E, 0x2A), - ILI9881C_COMMAND_INSTR(0x6F, 0x33), - ILI9881C_COMMAND_INSTR(0x3B, 0x98), + ILI9881C_COMMAND_INSTR(0x6c, 0x15), + ILI9881C_COMMAND_INSTR(0x6e, 0x2a), + ILI9881C_COMMAND_INSTR(0x6f, 0x33), + ILI9881C_COMMAND_INSTR(0x3b, 0x98), ILI9881C_COMMAND_INSTR(0x3a, 0x94), - ILI9881C_COMMAND_INSTR(0x8D, 0x14), - ILI9881C_COMMAND_INSTR(0x87, 0xBA), + ILI9881C_COMMAND_INSTR(0x8d, 0x14), + ILI9881C_COMMAND_INSTR(0x87, 0xba), ILI9881C_COMMAND_INSTR(0x26, 0x76), - ILI9881C_COMMAND_INSTR(0xB2, 0xD1), - ILI9881C_COMMAND_INSTR(0xB5, 0x06), + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), + ILI9881C_COMMAND_INSTR(0xb5, 0x06), ILI9881C_COMMAND_INSTR(0x38, 0x01), ILI9881C_COMMAND_INSTR(0x39, 0x00), ILI9881C_SWITCH_PAGE_INSTR(1), - ILI9881C_COMMAND_INSTR(0x22, 0x0A), + ILI9881C_COMMAND_INSTR(0x22, 0x0a), ILI9881C_COMMAND_INSTR(0x31, 0x00), ILI9881C_COMMAND_INSTR(0x53, 0x7d), ILI9881C_COMMAND_INSTR(0x55, 0x8f), @@ -1375,46 +1761,46 @@ static const struct ili9881c_instr rpi_7inch_init[] = { ILI9881C_COMMAND_INSTR(0x50, 0x96), ILI9881C_COMMAND_INSTR(0x51, 0x96), ILI9881C_COMMAND_INSTR(0x60, 0x23), - ILI9881C_COMMAND_INSTR(0xA0, 0x08), - ILI9881C_COMMAND_INSTR(0xA1, 0x1d), - ILI9881C_COMMAND_INSTR(0xA2, 0x2a), - ILI9881C_COMMAND_INSTR(0xA3, 0x10), - ILI9881C_COMMAND_INSTR(0xA4, 0x15), - ILI9881C_COMMAND_INSTR(0xA5, 0x28), - ILI9881C_COMMAND_INSTR(0xA6, 0x1c), - ILI9881C_COMMAND_INSTR(0xA7, 0x1d), - ILI9881C_COMMAND_INSTR(0xA8, 0x7e), - ILI9881C_COMMAND_INSTR(0xA9, 0x1d), - ILI9881C_COMMAND_INSTR(0xAA, 0x29), - ILI9881C_COMMAND_INSTR(0xAB, 0x6b), - ILI9881C_COMMAND_INSTR(0xAC, 0x1a), - ILI9881C_COMMAND_INSTR(0xAD, 0x18), - ILI9881C_COMMAND_INSTR(0xAE, 0x4b), - ILI9881C_COMMAND_INSTR(0xAF, 0x20), - ILI9881C_COMMAND_INSTR(0xB0, 0x27), - ILI9881C_COMMAND_INSTR(0xB1, 0x50), - ILI9881C_COMMAND_INSTR(0xB2, 0x64), - ILI9881C_COMMAND_INSTR(0xB3, 0x39), - ILI9881C_COMMAND_INSTR(0xC0, 0x08), - ILI9881C_COMMAND_INSTR(0xC1, 0x1d), - ILI9881C_COMMAND_INSTR(0xC2, 0x2a), - ILI9881C_COMMAND_INSTR(0xC3, 0x10), - ILI9881C_COMMAND_INSTR(0xC4, 0x15), - ILI9881C_COMMAND_INSTR(0xC5, 0x28), - ILI9881C_COMMAND_INSTR(0xC6, 0x1c), - ILI9881C_COMMAND_INSTR(0xC7, 0x1d), - ILI9881C_COMMAND_INSTR(0xC8, 0x7e), - ILI9881C_COMMAND_INSTR(0xC9, 0x1d), - ILI9881C_COMMAND_INSTR(0xCA, 0x29), - ILI9881C_COMMAND_INSTR(0xCB, 0x6b), - ILI9881C_COMMAND_INSTR(0xCC, 0x1a), - ILI9881C_COMMAND_INSTR(0xCD, 0x18), - ILI9881C_COMMAND_INSTR(0xCE, 0x4b), - ILI9881C_COMMAND_INSTR(0xCF, 0x20), - ILI9881C_COMMAND_INSTR(0xD0, 0x27), - ILI9881C_COMMAND_INSTR(0xD1, 0x50), - ILI9881C_COMMAND_INSTR(0xD2, 0x64), - ILI9881C_COMMAND_INSTR(0xD3, 0x39), + ILI9881C_COMMAND_INSTR(0xa0, 0x08), + ILI9881C_COMMAND_INSTR(0xa1, 0x1d), + ILI9881C_COMMAND_INSTR(0xa2, 0x2a), + ILI9881C_COMMAND_INSTR(0xa3, 0x10), + ILI9881C_COMMAND_INSTR(0xa4, 0x15), + ILI9881C_COMMAND_INSTR(0xa5, 0x28), + ILI9881C_COMMAND_INSTR(0xa6, 0x1c), + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), + ILI9881C_COMMAND_INSTR(0xa8, 0x7e), + ILI9881C_COMMAND_INSTR(0xa9, 0x1d), + ILI9881C_COMMAND_INSTR(0xaa, 0x29), + ILI9881C_COMMAND_INSTR(0xab, 0x6b), + ILI9881C_COMMAND_INSTR(0xac, 0x1a), + ILI9881C_COMMAND_INSTR(0xad, 0x18), + ILI9881C_COMMAND_INSTR(0xae, 0x4b), + ILI9881C_COMMAND_INSTR(0xaf, 0x20), + ILI9881C_COMMAND_INSTR(0xb0, 0x27), + ILI9881C_COMMAND_INSTR(0xb1, 0x50), + ILI9881C_COMMAND_INSTR(0xb2, 0x64), + ILI9881C_COMMAND_INSTR(0xb3, 0x39), + ILI9881C_COMMAND_INSTR(0xc0, 0x08), + ILI9881C_COMMAND_INSTR(0xc1, 0x1d), + ILI9881C_COMMAND_INSTR(0xc2, 0x2a), + ILI9881C_COMMAND_INSTR(0xc3, 0x10), + ILI9881C_COMMAND_INSTR(0xc4, 0x15), + ILI9881C_COMMAND_INSTR(0xc5, 0x28), + ILI9881C_COMMAND_INSTR(0xc6, 0x1c), + ILI9881C_COMMAND_INSTR(0xc7, 0x1d), + ILI9881C_COMMAND_INSTR(0xc8, 0x7e), + ILI9881C_COMMAND_INSTR(0xc9, 0x1d), + ILI9881C_COMMAND_INSTR(0xca, 0x29), + ILI9881C_COMMAND_INSTR(0xcb, 0x6b), + ILI9881C_COMMAND_INSTR(0xcc, 0x1a), + ILI9881C_COMMAND_INSTR(0xcd, 0x18), + ILI9881C_COMMAND_INSTR(0xce, 0x4b), + ILI9881C_COMMAND_INSTR(0xcf, 0x20), + ILI9881C_COMMAND_INSTR(0xd0, 0x27), + ILI9881C_COMMAND_INSTR(0xd1, 0x50), + ILI9881C_COMMAND_INSTR(0xd2, 0x64), + ILI9881C_COMMAND_INSTR(0xd3, 0x39), }; static const struct ili9881c_instr bsd1218_a101kl68_init[] = { @@ -1772,6 +2158,23 @@ static const struct drm_display_mode tl050hdv35_default_mode = { .height_mm = 110, }; +static const struct drm_display_mode w552946aaa_default_mode = { + .clock = 65000, + + .hdisplay = 720, + .hsync_start = 720 + 52, + .hsync_end = 720 + 52 + 8, + .htotal = 720 + 52 + 8 + 48, + + .vdisplay = 1280, + .vsync_start = 1280 + 16, + .vsync_end = 1280 + 16 + 6, + .vtotal = 1280 + 16 + 6 + 15, + + .width_mm = 68, + .height_mm = 121, +}; + static const struct drm_display_mode w552946aba_default_mode = { .clock = 64000, @@ -1806,6 +2209,23 @@ static const struct drm_display_mode am8001280g_default_mode = { .height_mm = 151, }; +static const struct drm_display_mode rpi_5inch_default_mode = { + .clock = 83333, + + .hdisplay = 720, + .hsync_start = 720 + 110, + .hsync_end = 720 + 110 + 12, + .htotal = 720 + 110 + 12 + 95, + + .vdisplay = 1280, + .vsync_start = 1280 + 100, + .vsync_end = 1280 + 100 + 2, + .vtotal = 1280 + 100 + 2 + 100, + + .width_mm = 62, + .height_mm = 110, +}; + static const struct drm_display_mode rpi_7inch_default_mode = { .clock = 83330, @@ -1983,6 +2403,15 @@ static const struct ili9881c_desc tl050hdv35_desc = { .default_address_mode = 0x03, }; +static const struct ili9881c_desc w552946aaa_desc = { + .init = w552946aaa_init, + .init_length = ARRAY_SIZE(w552946aaa_init), + .mode = &w552946aaa_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, + .lanes = 2, +}; + static const struct ili9881c_desc w552946aba_desc = { .init = w552946ab_init, .init_length = ARRAY_SIZE(w552946ab_init), @@ -2000,6 +2429,14 @@ static const struct ili9881c_desc am8001280g_desc = { MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM, }; +static const struct ili9881c_desc rpi_5inch_desc = { + .init = rpi_5inch_init, + .init_length = ARRAY_SIZE(rpi_5inch_init), + .mode = &rpi_5inch_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM, + .lanes = 2, +}; + static const struct ili9881c_desc rpi_7inch_desc = { .init = rpi_7inch_init, .init_length = ARRAY_SIZE(rpi_7inch_init), @@ -2023,8 +2460,10 @@ static const struct of_device_id ili9881c_of_match[] = { { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc }, { .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc }, { .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc }, + { .compatible = "wanchanglong,w552946aaa", .data = &w552946aaa_desc }, { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc }, { .compatible = "ampire,am8001280g", .data = &am8001280g_desc }, + { .compatible = "raspberrypi,dsi-5inch", &rpi_5inch_desc }, { .compatible = "raspberrypi,dsi-7inch", &rpi_7inch_desc }, { } }; diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c index 85c7059be214..c52f20863fc7 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c @@ -61,6 +61,13 @@ struct ili9882t { mipi_dsi_dcs_write_seq_multi(ctx, ILI9882T_DCS_SWITCH_PAGE, \ 0x98, 0x82, (page)) +/* IL79900A-specific commands, add new commands as you decode them */ +#define IL79900A_DCS_SWITCH_PAGE 0xFF + +#define il79900a_switch_page(ctx, page) \ + mipi_dsi_dcs_write_seq_multi(ctx, IL79900A_DCS_SWITCH_PAGE, \ + 0x5a, 0xa5, (page)) + static int starry_ili9882t_init(struct ili9882t *ili) { struct mipi_dsi_multi_context ctx = { .dsi = ili->dsi }; @@ -413,6 +420,38 @@ static int starry_ili9882t_init(struct ili9882t *ili) return ctx.accum_err; }; +static int tianma_il79900a_init(struct ili9882t *ili) +{ + struct mipi_dsi_multi_context ctx = { .dsi = ili->dsi }; + + mipi_dsi_usleep_range(&ctx, 5000, 5100); + + il79900a_switch_page(&ctx, 0x06); + mipi_dsi_dcs_write_seq_multi(&ctx, 0x3e, 0x62); + + il79900a_switch_page(&ctx, 0x02); + mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x20); + mipi_dsi_dcs_write_seq_multi(&ctx, 0x5d, 0x00); + mipi_dsi_dcs_write_seq_multi(&ctx, 0x5e, 0x40); + + il79900a_switch_page(&ctx, 0x07); + mipi_dsi_dcs_write_seq_multi(&ctx, 0X29, 0x00); + + il79900a_switch_page(&ctx, 0x06); + mipi_dsi_dcs_write_seq_multi(&ctx, 0x92, 0x22); + + il79900a_switch_page(&ctx, 0x00); + mipi_dsi_dcs_exit_sleep_mode_multi(&ctx); + + mipi_dsi_msleep(&ctx, 120); + + mipi_dsi_dcs_set_display_on_multi(&ctx); + + mipi_dsi_msleep(&ctx, 80); + + return 0; +}; + static inline struct ili9882t *to_ili9882t(struct drm_panel *panel) { return container_of(panel, struct ili9882t, base); @@ -529,6 +568,19 @@ static const struct drm_display_mode starry_ili9882t_default_mode = { .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }; +static const struct drm_display_mode tianma_il79900a_default_mode = { + .clock = 264355, + .hdisplay = 1600, + .hsync_start = 1600 + 20, + .hsync_end = 1600 + 20 + 4, + .htotal = 1600 + 20 + 4 + 20, + .vdisplay = 2560, + .vsync_start = 2560 + 82, + .vsync_end = 2560 + 82 + 2, + .vtotal = 2560 + 82 + 2 + 36, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + static const struct panel_desc starry_ili9882t_desc = { .modes = &starry_ili9882t_default_mode, .bpc = 8, @@ -543,6 +595,20 @@ static const struct panel_desc starry_ili9882t_desc = { .init = starry_ili9882t_init, }; +static const struct panel_desc tianma_tl121bvms07_desc = { + .modes = &tianma_il79900a_default_mode, + .bpc = 8, + .size = { + .width_mm = 163, + .height_mm = 260, + }, + .lanes = 3, + .format = MIPI_DSI_FMT_RGB888, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_LPM, + .init = tianma_il79900a_init, +}; + static int ili9882t_get_modes(struct drm_panel *panel, struct drm_connector *connector) { @@ -680,6 +746,9 @@ static const struct of_device_id ili9882t_of_match[] = { { .compatible = "starry,ili9882t", .data = &starry_ili9882t_desc }, + { .compatible = "tianma,tl121bvms07-00", + .data = &tianma_tl121bvms07_desc + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ili9882t_of_match); diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c index 5c2530598ddb..aa05316dc57b 100644 --- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c +++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c @@ -1132,22 +1132,19 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi) dsi->lanes = desc->lanes; jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(jadard->reset)) { - DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n"); - return PTR_ERR(jadard->reset); - } + if (IS_ERR(jadard->reset)) + return dev_err_probe(&dsi->dev, PTR_ERR(jadard->reset), + "failed to get our reset GPIO\n"); jadard->vdd = devm_regulator_get(dev, "vdd"); - if (IS_ERR(jadard->vdd)) { - DRM_DEV_ERROR(&dsi->dev, "failed to get vdd regulator\n"); - return PTR_ERR(jadard->vdd); - } + if (IS_ERR(jadard->vdd)) + return dev_err_probe(&dsi->dev, PTR_ERR(jadard->vdd), + "failed to get vdd regulator\n"); jadard->vccio = devm_regulator_get(dev, "vccio"); - if (IS_ERR(jadard->vccio)) { - DRM_DEV_ERROR(&dsi->dev, "failed to get vccio regulator\n"); - return PTR_ERR(jadard->vccio); - } + if (IS_ERR(jadard->vccio)) + return dev_err_probe(&dsi->dev, PTR_ERR(jadard->vccio), + "failed to get vccio regulator\n"); ret = of_drm_get_panel_orientation(dev->of_node, &jadard->orientation); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-lg-ld070wx3.c b/drivers/gpu/drm/panel/panel-lg-ld070wx3.c new file mode 100644 index 000000000000..00cbfc5518a5 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-lg-ld070wx3.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/array_size.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +static const struct regulator_bulk_data lg_ld070wx3_supplies[] = { + { .supply = "vdd" }, { .supply = "vcc" }, +}; + +struct lg_ld070wx3 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + + struct regulator_bulk_data *supplies; +}; + +static inline struct lg_ld070wx3 *to_lg_ld070wx3(struct drm_panel *panel) +{ + return container_of(panel, struct lg_ld070wx3, panel); +} + +static int lg_ld070wx3_prepare(struct drm_panel *panel) +{ + struct lg_ld070wx3 *priv = to_lg_ld070wx3(panel); + struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi }; + struct device *dev = panel->dev; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(lg_ld070wx3_supplies), priv->supplies); + if (ret < 0) { + dev_err(dev, "failed to enable power supplies: %d\n", ret); + return ret; + } + + /* + * According to spec delay between enabling supply is 0, + * for regulators to reach required voltage ~5ms needed. + * MIPI interface signal for setup requires additional + * 110ms which in total results in 115ms. + */ + mdelay(115); + + mipi_dsi_dcs_soft_reset_multi(&ctx); + mipi_dsi_msleep(&ctx, 20); + + /* Differential input impedance selection */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xae, 0x0b); + + /* Enter test mode 1 and 2*/ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xee, 0xea); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xef, 0x5f); + + /* Increased MIPI CLK driving ability */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x68); + + /* Exit test mode 1 and 2 */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xee, 0x00); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xef, 0x00); + + return ctx.accum_err; +} + +static int lg_ld070wx3_unprepare(struct drm_panel *panel) +{ + struct lg_ld070wx3 *priv = to_lg_ld070wx3(panel); + struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi }; + + mipi_dsi_dcs_enter_sleep_mode_multi(&ctx); + + msleep(50); + + regulator_bulk_disable(ARRAY_SIZE(lg_ld070wx3_supplies), priv->supplies); + + /* power supply must be off for at least 1s after panel disable */ + msleep(1000); + + return 0; +} + +static const struct drm_display_mode lg_ld070wx3_mode = { + .clock = (800 + 32 + 48 + 8) * (1280 + 5 + 3 + 1) * 60 / 1000, + .hdisplay = 800, + .hsync_start = 800 + 32, + .hsync_end = 800 + 32 + 48, + .htotal = 800 + 32 + 48 + 8, + .vdisplay = 1280, + .vsync_start = 1280 + 5, + .vsync_end = 1280 + 5 + 3, + .vtotal = 1280 + 5 + 3 + 1, + .width_mm = 94, + .height_mm = 151, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + +static int lg_ld070wx3_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &lg_ld070wx3_mode); +} + +static const struct drm_panel_funcs lg_ld070wx3_panel_funcs = { + .prepare = lg_ld070wx3_prepare, + .unprepare = lg_ld070wx3_unprepare, + .get_modes = lg_ld070wx3_get_modes, +}; + +static int lg_ld070wx3_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct lg_ld070wx3 *priv; + int ret; + + priv = devm_drm_panel_alloc(dev, struct lg_ld070wx3, panel, + &lg_ld070wx3_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(priv)) + return PTR_ERR(priv); + + ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(lg_ld070wx3_supplies), + lg_ld070wx3_supplies, &priv->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get supplies\n"); + + priv->dsi = dsi; + mipi_dsi_set_drvdata(dsi, priv); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM; + + ret = drm_panel_of_backlight(&priv->panel); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get backlight\n"); + + drm_panel_add(&priv->panel); + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + drm_panel_remove(&priv->panel); + return dev_err_probe(dev, ret, "failed to attach to DSI host\n"); + } + + return 0; +} + +static void lg_ld070wx3_remove(struct mipi_dsi_device *dsi) +{ + struct lg_ld070wx3 *priv = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&priv->panel); +} + +static const struct of_device_id lg_ld070wx3_of_match[] = { + { .compatible = "lg,ld070wx3-sl01" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, lg_ld070wx3_of_match); + +static struct mipi_dsi_driver lg_ld070wx3_driver = { + .driver = { + .name = "panel-lg-ld070wx3", + .of_match_table = lg_ld070wx3_of_match, + }, + .probe = lg_ld070wx3_probe, + .remove = lg_ld070wx3_remove, +}; +module_mipi_dsi_driver(lg_ld070wx3_driver); + +MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>"); +MODULE_DESCRIPTION("LG LD070WX3-SL01 DSI panel driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c index 0db9cadd868e..18130bc14201 100644 --- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c +++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c @@ -43,59 +43,12 @@ struct nv3052c { struct gpio_desc *reset_gpio; }; -static const struct nv3052c_reg ltk035c5444t_panel_regs[] = { - // EXTC Command set enable, select page 1 - { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x01 }, - // Mostly unknown registers - { 0xe3, 0x00 }, - { 0x40, 0x00 }, - { 0x03, 0x40 }, - { 0x04, 0x00 }, - { 0x05, 0x03 }, - { 0x08, 0x00 }, - { 0x09, 0x07 }, - { 0x0a, 0x01 }, - { 0x0b, 0x32 }, - { 0x0c, 0x32 }, - { 0x0d, 0x0b }, - { 0x0e, 0x00 }, - { 0x23, 0xa0 }, - { 0x24, 0x0c }, - { 0x25, 0x06 }, - { 0x26, 0x14 }, - { 0x27, 0x14 }, - { 0x38, 0xcc }, // VCOM_ADJ1 - { 0x39, 0xd7 }, // VCOM_ADJ2 - { 0x3a, 0x4a }, // VCOM_ADJ3 - { 0x28, 0x40 }, - { 0x29, 0x01 }, - { 0x2a, 0xdf }, - { 0x49, 0x3c }, - { 0x91, 0x77 }, // EXTPW_CTRL2 - { 0x92, 0x77 }, // EXTPW_CTRL3 - { 0xa0, 0x55 }, - { 0xa1, 0x50 }, - { 0xa4, 0x9c }, - { 0xa7, 0x02 }, - { 0xa8, 0x01 }, - { 0xa9, 0x01 }, - { 0xaa, 0xfc }, - { 0xab, 0x28 }, - { 0xac, 0x06 }, - { 0xad, 0x06 }, - { 0xae, 0x06 }, - { 0xaf, 0x03 }, - { 0xb0, 0x08 }, - { 0xb1, 0x26 }, - { 0xb2, 0x28 }, - { 0xb3, 0x28 }, - { 0xb4, 0x33 }, - { 0xb5, 0x08 }, - { 0xb6, 0x26 }, - { 0xb7, 0x08 }, - { 0xb8, 0x26 }, - { 0xf0, 0x00 }, - { 0xf6, 0xc0 }, +/* + * Common initialization registers for all currently + * supported displays. Mostly seem to be related + * to Gamma correction curves and output pad mappings. + */ +static const struct nv3052c_reg common_init_regs[] = { // EXTC Command set enable, select page 2 { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 }, // Set gray scale voltage to adjust gamma @@ -215,7 +168,7 @@ static const struct nv3052c_reg ltk035c5444t_panel_regs[] = { { 0xa0, 0x01 }, // PANELU2D33 // EXTC Command set enable, select page 2 { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 }, - // Unknown registers + // Page 2 register values (0x01..0x10) are same for nv3051d and nv3052c { 0x01, 0x01 }, { 0x02, 0xda }, { 0x03, 0xba }, @@ -236,6 +189,62 @@ static const struct nv3052c_reg ltk035c5444t_panel_regs[] = { { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x00 }, // Display Access Control { 0x36, 0x0a }, // bgr = 1, ss = 1, gs = 0 + +}; + +static const struct nv3052c_reg ltk035c5444t_panel_regs[] = { + // EXTC Command set enable, select page 1 + { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x01 }, + // Mostly unknown registers + { 0xe3, 0x00 }, + { 0x40, 0x00 }, + { 0x03, 0x40 }, + { 0x04, 0x00 }, + { 0x05, 0x03 }, + { 0x08, 0x00 }, + { 0x09, 0x07 }, + { 0x0a, 0x01 }, + { 0x0b, 0x32 }, + { 0x0c, 0x32 }, + { 0x0d, 0x0b }, + { 0x0e, 0x00 }, + { 0x23, 0xa0 }, + { 0x24, 0x0c }, + { 0x25, 0x06 }, + { 0x26, 0x14 }, + { 0x27, 0x14 }, + { 0x38, 0xcc }, // VCOM_ADJ1 + { 0x39, 0xd7 }, // VCOM_ADJ2 + { 0x3a, 0x4a }, // VCOM_ADJ3 + { 0x28, 0x40 }, + { 0x29, 0x01 }, + { 0x2a, 0xdf }, + { 0x49, 0x3c }, + { 0x91, 0x77 }, // EXTPW_CTRL2 + { 0x92, 0x77 }, // EXTPW_CTRL3 + { 0xa0, 0x55 }, + { 0xa1, 0x50 }, + { 0xa4, 0x9c }, + { 0xa7, 0x02 }, + { 0xa8, 0x01 }, + { 0xa9, 0x01 }, + { 0xaa, 0xfc }, + { 0xab, 0x28 }, + { 0xac, 0x06 }, + { 0xad, 0x06 }, + { 0xae, 0x06 }, + { 0xaf, 0x03 }, + { 0xb0, 0x08 }, + { 0xb1, 0x26 }, + { 0xb2, 0x28 }, + { 0xb3, 0x28 }, + { 0xb4, 0x33 }, + { 0xb5, 0x08 }, + { 0xb6, 0x26 }, + { 0xb7, 0x08 }, + { 0xb8, 0x26 }, + { 0xf0, 0x00 }, + { 0xf6, 0xc0 }, }; static const struct nv3052c_reg fs035vg158_panel_regs[] = { @@ -291,146 +300,6 @@ static const struct nv3052c_reg fs035vg158_panel_regs[] = { { 0xb8, 0x26 }, { 0xf0, 0x00 }, { 0xf6, 0xc0 }, - // EXTC Command set enable, select page 0 - { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 }, - // Set gray scale voltage to adjust gamma - { 0xb0, 0x0b }, // PGAMVR0 - { 0xb1, 0x16 }, // PGAMVR1 - { 0xb2, 0x17 }, // PGAMVR2 - { 0xb3, 0x2c }, // PGAMVR3 - { 0xb4, 0x32 }, // PGAMVR4 - { 0xb5, 0x3b }, // PGAMVR5 - { 0xb6, 0x29 }, // PGAMPR0 - { 0xb7, 0x40 }, // PGAMPR1 - { 0xb8, 0x0d }, // PGAMPK0 - { 0xb9, 0x05 }, // PGAMPK1 - { 0xba, 0x12 }, // PGAMPK2 - { 0xbb, 0x10 }, // PGAMPK3 - { 0xbc, 0x12 }, // PGAMPK4 - { 0xbd, 0x15 }, // PGAMPK5 - { 0xbe, 0x19 }, // PGAMPK6 - { 0xbf, 0x0e }, // PGAMPK7 - { 0xc0, 0x16 }, // PGAMPK8 - { 0xc1, 0x0a }, // PGAMPK9 - // Set gray scale voltage to adjust gamma - { 0xd0, 0x0c }, // NGAMVR0 - { 0xd1, 0x17 }, // NGAMVR0 - { 0xd2, 0x14 }, // NGAMVR1 - { 0xd3, 0x2e }, // NGAMVR2 - { 0xd4, 0x32 }, // NGAMVR3 - { 0xd5, 0x3c }, // NGAMVR4 - { 0xd6, 0x22 }, // NGAMPR0 - { 0xd7, 0x3d }, // NGAMPR1 - { 0xd8, 0x0d }, // NGAMPK0 - { 0xd9, 0x07 }, // NGAMPK1 - { 0xda, 0x13 }, // NGAMPK2 - { 0xdb, 0x13 }, // NGAMPK3 - { 0xdc, 0x11 }, // NGAMPK4 - { 0xdd, 0x15 }, // NGAMPK5 - { 0xde, 0x19 }, // NGAMPK6 - { 0xdf, 0x10 }, // NGAMPK7 - { 0xe0, 0x17 }, // NGAMPK8 - { 0xe1, 0x0a }, // NGAMPK9 - // EXTC Command set enable, select page 3 - { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x03 }, - // Set various timing settings - { 0x00, 0x2a }, // GIP_VST_1 - { 0x01, 0x2a }, // GIP_VST_2 - { 0x02, 0x2a }, // GIP_VST_3 - { 0x03, 0x2a }, // GIP_VST_4 - { 0x04, 0x61 }, // GIP_VST_5 - { 0x05, 0x80 }, // GIP_VST_6 - { 0x06, 0xc7 }, // GIP_VST_7 - { 0x07, 0x01 }, // GIP_VST_8 - { 0x08, 0x03 }, // GIP_VST_9 - { 0x09, 0x04 }, // GIP_VST_10 - { 0x70, 0x22 }, // GIP_ECLK1 - { 0x71, 0x80 }, // GIP_ECLK2 - { 0x30, 0x2a }, // GIP_CLK_1 - { 0x31, 0x2a }, // GIP_CLK_2 - { 0x32, 0x2a }, // GIP_CLK_3 - { 0x33, 0x2a }, // GIP_CLK_4 - { 0x34, 0x61 }, // GIP_CLK_5 - { 0x35, 0xc5 }, // GIP_CLK_6 - { 0x36, 0x80 }, // GIP_CLK_7 - { 0x37, 0x23 }, // GIP_CLK_8 - { 0x40, 0x03 }, // GIP_CLKA_1 - { 0x41, 0x04 }, // GIP_CLKA_2 - { 0x42, 0x05 }, // GIP_CLKA_3 - { 0x43, 0x06 }, // GIP_CLKA_4 - { 0x44, 0x11 }, // GIP_CLKA_5 - { 0x45, 0xe8 }, // GIP_CLKA_6 - { 0x46, 0xe9 }, // GIP_CLKA_7 - { 0x47, 0x11 }, // GIP_CLKA_8 - { 0x48, 0xea }, // GIP_CLKA_9 - { 0x49, 0xeb }, // GIP_CLKA_10 - { 0x50, 0x07 }, // GIP_CLKB_1 - { 0x51, 0x08 }, // GIP_CLKB_2 - { 0x52, 0x09 }, // GIP_CLKB_3 - { 0x53, 0x0a }, // GIP_CLKB_4 - { 0x54, 0x11 }, // GIP_CLKB_5 - { 0x55, 0xec }, // GIP_CLKB_6 - { 0x56, 0xed }, // GIP_CLKB_7 - { 0x57, 0x11 }, // GIP_CLKB_8 - { 0x58, 0xef }, // GIP_CLKB_9 - { 0x59, 0xf0 }, // GIP_CLKB_10 - // Map internal GOA signals to GOA output pad - { 0xb1, 0x01 }, // PANELD2U2 - { 0xb4, 0x15 }, // PANELD2U5 - { 0xb5, 0x16 }, // PANELD2U6 - { 0xb6, 0x09 }, // PANELD2U7 - { 0xb7, 0x0f }, // PANELD2U8 - { 0xb8, 0x0d }, // PANELD2U9 - { 0xb9, 0x0b }, // PANELD2U10 - { 0xba, 0x00 }, // PANELD2U11 - { 0xc7, 0x02 }, // PANELD2U24 - { 0xca, 0x17 }, // PANELD2U27 - { 0xcb, 0x18 }, // PANELD2U28 - { 0xcc, 0x0a }, // PANELD2U29 - { 0xcd, 0x10 }, // PANELD2U30 - { 0xce, 0x0e }, // PANELD2U31 - { 0xcf, 0x0c }, // PANELD2U32 - { 0xd0, 0x00 }, // PANELD2U33 - // Map internal GOA signals to GOA output pad - { 0x81, 0x00 }, // PANELU2D2 - { 0x84, 0x15 }, // PANELU2D5 - { 0x85, 0x16 }, // PANELU2D6 - { 0x86, 0x10 }, // PANELU2D7 - { 0x87, 0x0a }, // PANELU2D8 - { 0x88, 0x0c }, // PANELU2D9 - { 0x89, 0x0e }, // PANELU2D10 - { 0x8a, 0x02 }, // PANELU2D11 - { 0x97, 0x00 }, // PANELU2D24 - { 0x9a, 0x17 }, // PANELU2D27 - { 0x9b, 0x18 }, // PANELU2D28 - { 0x9c, 0x0f }, // PANELU2D29 - { 0x9d, 0x09 }, // PANELU2D30 - { 0x9e, 0x0b }, // PANELU2D31 - { 0x9f, 0x0d }, // PANELU2D32 - { 0xa0, 0x01 }, // PANELU2D33 - // EXTC Command set enable, select page 2 - { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 }, - // Unknown registers - { 0x01, 0x01 }, - { 0x02, 0xda }, - { 0x03, 0xba }, - { 0x04, 0xa8 }, - { 0x05, 0x9a }, - { 0x06, 0x70 }, - { 0x07, 0xff }, - { 0x08, 0x91 }, - { 0x09, 0x90 }, - { 0x0a, 0xff }, - { 0x0b, 0x8f }, - { 0x0c, 0x60 }, - { 0x0d, 0x58 }, - { 0x0e, 0x48 }, - { 0x0f, 0x38 }, - { 0x10, 0x2b }, - // EXTC Command set enable, select page 0 - { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x00 }, - // Display Access Control - { 0x36, 0x0a }, // bgr = 1, ss = 1, gs = 0 }; @@ -487,146 +356,6 @@ static const struct nv3052c_reg wl_355608_a8_panel_regs[] = { { 0xb8, 0x26 }, { 0xf0, 0x00 }, { 0xf6, 0xc0 }, - // EXTC Command set enable, select page 2 - { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 }, - // Set gray scale voltage to adjust gamma - { 0xb0, 0x0b }, // PGAMVR0 - { 0xb1, 0x16 }, // PGAMVR1 - { 0xb2, 0x17 }, // PGAMVR2 - { 0xb3, 0x2c }, // PGAMVR3 - { 0xb4, 0x32 }, // PGAMVR4 - { 0xb5, 0x3b }, // PGAMVR5 - { 0xb6, 0x29 }, // PGAMPR0 - { 0xb7, 0x40 }, // PGAMPR1 - { 0xb8, 0x0d }, // PGAMPK0 - { 0xb9, 0x05 }, // PGAMPK1 - { 0xba, 0x12 }, // PGAMPK2 - { 0xbb, 0x10 }, // PGAMPK3 - { 0xbc, 0x12 }, // PGAMPK4 - { 0xbd, 0x15 }, // PGAMPK5 - { 0xbe, 0x19 }, // PGAMPK6 - { 0xbf, 0x0e }, // PGAMPK7 - { 0xc0, 0x16 }, // PGAMPK8 - { 0xc1, 0x0a }, // PGAMPK9 - // Set gray scale voltage to adjust gamma - { 0xd0, 0x0c }, // NGAMVR0 - { 0xd1, 0x17 }, // NGAMVR0 - { 0xd2, 0x14 }, // NGAMVR1 - { 0xd3, 0x2e }, // NGAMVR2 - { 0xd4, 0x32 }, // NGAMVR3 - { 0xd5, 0x3c }, // NGAMVR4 - { 0xd6, 0x22 }, // NGAMPR0 - { 0xd7, 0x3d }, // NGAMPR1 - { 0xd8, 0x0d }, // NGAMPK0 - { 0xd9, 0x07 }, // NGAMPK1 - { 0xda, 0x13 }, // NGAMPK2 - { 0xdb, 0x13 }, // NGAMPK3 - { 0xdc, 0x11 }, // NGAMPK4 - { 0xdd, 0x15 }, // NGAMPK5 - { 0xde, 0x19 }, // NGAMPK6 - { 0xdf, 0x10 }, // NGAMPK7 - { 0xe0, 0x17 }, // NGAMPK8 - { 0xe1, 0x0a }, // NGAMPK9 - // EXTC Command set enable, select page 3 - { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x03 }, - // Set various timing settings - { 0x00, 0x2a }, // GIP_VST_1 - { 0x01, 0x2a }, // GIP_VST_2 - { 0x02, 0x2a }, // GIP_VST_3 - { 0x03, 0x2a }, // GIP_VST_4 - { 0x04, 0x61 }, // GIP_VST_5 - { 0x05, 0x80 }, // GIP_VST_6 - { 0x06, 0xc7 }, // GIP_VST_7 - { 0x07, 0x01 }, // GIP_VST_8 - { 0x08, 0x03 }, // GIP_VST_9 - { 0x09, 0x04 }, // GIP_VST_10 - { 0x70, 0x22 }, // GIP_ECLK1 - { 0x71, 0x80 }, // GIP_ECLK2 - { 0x30, 0x2a }, // GIP_CLK_1 - { 0x31, 0x2a }, // GIP_CLK_2 - { 0x32, 0x2a }, // GIP_CLK_3 - { 0x33, 0x2a }, // GIP_CLK_4 - { 0x34, 0x61 }, // GIP_CLK_5 - { 0x35, 0xc5 }, // GIP_CLK_6 - { 0x36, 0x80 }, // GIP_CLK_7 - { 0x37, 0x23 }, // GIP_CLK_8 - { 0x40, 0x03 }, // GIP_CLKA_1 - { 0x41, 0x04 }, // GIP_CLKA_2 - { 0x42, 0x05 }, // GIP_CLKA_3 - { 0x43, 0x06 }, // GIP_CLKA_4 - { 0x44, 0x11 }, // GIP_CLKA_5 - { 0x45, 0xe8 }, // GIP_CLKA_6 - { 0x46, 0xe9 }, // GIP_CLKA_7 - { 0x47, 0x11 }, // GIP_CLKA_8 - { 0x48, 0xea }, // GIP_CLKA_9 - { 0x49, 0xeb }, // GIP_CLKA_10 - { 0x50, 0x07 }, // GIP_CLKB_1 - { 0x51, 0x08 }, // GIP_CLKB_2 - { 0x52, 0x09 }, // GIP_CLKB_3 - { 0x53, 0x0a }, // GIP_CLKB_4 - { 0x54, 0x11 }, // GIP_CLKB_5 - { 0x55, 0xec }, // GIP_CLKB_6 - { 0x56, 0xed }, // GIP_CLKB_7 - { 0x57, 0x11 }, // GIP_CLKB_8 - { 0x58, 0xef }, // GIP_CLKB_9 - { 0x59, 0xf0 }, // GIP_CLKB_10 - // Map internal GOA signals to GOA output pad - { 0xb1, 0x01 }, // PANELD2U2 - { 0xb4, 0x15 }, // PANELD2U5 - { 0xb5, 0x16 }, // PANELD2U6 - { 0xb6, 0x09 }, // PANELD2U7 - { 0xb7, 0x0f }, // PANELD2U8 - { 0xb8, 0x0d }, // PANELD2U9 - { 0xb9, 0x0b }, // PANELD2U10 - { 0xba, 0x00 }, // PANELD2U11 - { 0xc7, 0x02 }, // PANELD2U24 - { 0xca, 0x17 }, // PANELD2U27 - { 0xcb, 0x18 }, // PANELD2U28 - { 0xcc, 0x0a }, // PANELD2U29 - { 0xcd, 0x10 }, // PANELD2U30 - { 0xce, 0x0e }, // PANELD2U31 - { 0xcf, 0x0c }, // PANELD2U32 - { 0xd0, 0x00 }, // PANELD2U33 - // Map internal GOA signals to GOA output pad - { 0x81, 0x00 }, // PANELU2D2 - { 0x84, 0x15 }, // PANELU2D5 - { 0x85, 0x16 }, // PANELU2D6 - { 0x86, 0x10 }, // PANELU2D7 - { 0x87, 0x0a }, // PANELU2D8 - { 0x88, 0x0c }, // PANELU2D9 - { 0x89, 0x0e }, // PANELU2D10 - { 0x8a, 0x02 }, // PANELU2D11 - { 0x97, 0x00 }, // PANELU2D24 - { 0x9a, 0x17 }, // PANELU2D27 - { 0x9b, 0x18 }, // PANELU2D28 - { 0x9c, 0x0f }, // PANELU2D29 - { 0x9d, 0x09 }, // PANELU2D30 - { 0x9e, 0x0b }, // PANELU2D31 - { 0x9f, 0x0d }, // PANELU2D32 - { 0xa0, 0x01 }, // PANELU2D33 - // EXTC Command set enable, select page 2 - { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 }, - // Unknown registers - { 0x01, 0x01 }, - { 0x02, 0xda }, - { 0x03, 0xba }, - { 0x04, 0xa8 }, - { 0x05, 0x9a }, - { 0x06, 0x70 }, - { 0x07, 0xff }, - { 0x08, 0x91 }, - { 0x09, 0x90 }, - { 0x0a, 0xff }, - { 0x0b, 0x8f }, - { 0x0c, 0x60 }, - { 0x0d, 0x58 }, - { 0x0e, 0x48 }, - { 0x0f, 0x38 }, - { 0x10, 0x2b }, - // EXTC Command set enable, select page 0 - { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x00 }, - // Display Access Control - { 0x36, 0x0a }, // bgr = 1, ss = 1, gs = 0 }; static inline struct nv3052c *to_nv3052c(struct drm_panel *panel) @@ -655,6 +384,7 @@ static int nv3052c_prepare(struct drm_panel *panel) gpiod_set_value_cansleep(priv->reset_gpio, 0); usleep_range(5000, 20000); + /* Apply panel-specific initialization registers */ for (i = 0; i < panel_regs_len; i++) { err = mipi_dbi_command(dbi, panel_regs[i].cmd, panel_regs[i].val); @@ -665,6 +395,16 @@ static int nv3052c_prepare(struct drm_panel *panel) } } + /* Apply common initialization registers */ + for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) { + err = mipi_dbi_command(dbi, common_init_regs[i].cmd, + common_init_regs[i].val); + if (err) { + dev_err(priv->dev, "Unable to set register: %d\n", err); + goto err_disable_regulator; + } + } + err = mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); if (err) { dev_err(priv->dev, "Unable to exit sleep mode: %d\n", err); diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c index ad35d0fb0a16..c3fbc459c7e0 100644 --- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c +++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c @@ -54,9 +54,9 @@ static int rb070d30_panel_prepare(struct drm_panel *panel) } msleep(20); - gpiod_set_value(ctx->gpios.power, 1); + gpiod_set_value_cansleep(ctx->gpios.power, 1); msleep(20); - gpiod_set_value(ctx->gpios.reset, 1); + gpiod_set_value_cansleep(ctx->gpios.reset, 1); msleep(20); return 0; } @@ -65,8 +65,8 @@ static int rb070d30_panel_unprepare(struct drm_panel *panel) { struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); - gpiod_set_value(ctx->gpios.reset, 0); - gpiod_set_value(ctx->gpios.power, 0); + gpiod_set_value_cansleep(ctx->gpios.reset, 0); + gpiod_set_value_cansleep(ctx->gpios.power, 0); regulator_disable(ctx->supply); return 0; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c new file mode 100644 index 000000000000..e63080204af7 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Nia Espera <a5b6@riseup.net> + * Copyright (c) 2025 David Heidelberg <david@ixit.cz> + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regulator/consumer.h> +#include <linux/swab.h> +#include <linux/backlight.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +#define MCS_ELVSS_ON 0xb1 + +struct samsung_s6e3fc2x01 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data *supplies; + struct gpio_desc *reset_gpio; +}; + +static const struct regulator_bulk_data s6e3fc2x01_supplies[] = { + { .supply = "vddio" }, + { .supply = "vci" }, + { .supply = "poc" }, +}; + +static inline +struct samsung_s6e3fc2x01 *to_samsung_s6e3fc2x01(struct drm_panel *panel) +{ + return container_of(panel, struct samsung_s6e3fc2x01, panel); +} + +#define s6e3fc2x01_test_key_on_lvl1(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0x9f, 0xa5, 0xa5) +#define s6e3fc2x01_test_key_off_lvl1(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0x9f, 0x5a, 0x5a) +#define s6e3fc2x01_test_key_on_lvl2(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x5a, 0x5a) +#define s6e3fc2x01_test_key_off_lvl2(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0xa5, 0xa5) +#define s6e3fc2x01_test_key_on_lvl3(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xfc, 0x5a, 0x5a) +#define s6e3fc2x01_test_key_off_lvl3(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xfc, 0xa5, 0xa5) + +static void s6e3fc2x01_reset(struct samsung_s6e3fc2x01 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(5000, 6000); +} + +static int s6e3fc2x01_on(struct samsung_s6e3fc2x01 *ctx) +{ + struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; + + s6e3fc2x01_test_key_on_lvl1(&dsi_ctx); + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x0a); + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); + + s6e3fc2x01_test_key_off_lvl1(&dsi_ctx); + + s6e3fc2x01_test_key_on_lvl2(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x01); + s6e3fc2x01_test_key_off_lvl2(&dsi_ctx); + + mipi_dsi_usleep_range(&dsi_ctx, 15000, 16000); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x0f); + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); + + s6e3fc2x01_test_key_on_lvl1(&dsi_ctx); + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + s6e3fc2x01_test_key_off_lvl1(&dsi_ctx); + + s6e3fc2x01_test_key_on_lvl2(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xeb, 0x17, + 0x41, 0x92, + 0x0e, 0x10, + 0x82, 0x5a); + s6e3fc2x01_test_key_off_lvl2(&dsi_ctx); + + /* Column & Page Address Setting */ + mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x0437); + mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0923); + + /* Horizontal & Vertical sync Setting */ + s6e3fc2x01_test_key_on_lvl2(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x09); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe8, 0x10, 0x30); + s6e3fc2x01_test_key_off_lvl2(&dsi_ctx); + + s6e3fc2x01_test_key_on_lvl3(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x88); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xed, 0x67); + s6e3fc2x01_test_key_off_lvl3(&dsi_ctx); + + s6e3fc2x01_test_key_on_lvl2(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x12); + s6e3fc2x01_test_key_off_lvl2(&dsi_ctx); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000); + + s6e3fc2x01_test_key_on_lvl2(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ELVSS_ON, 0x00, 0x01); + s6e3fc2x01_test_key_off_lvl2(&dsi_ctx); + + s6e3fc2x01_test_key_on_lvl2(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x00, 0xc1); + s6e3fc2x01_test_key_off_lvl2(&dsi_ctx); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x78); + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x90); + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); + + s6e3fc2x01_test_key_on_lvl2(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ELVSS_ON, 0xc6, 0x00, 0x00, + 0x21, 0xed, 0x02, 0x08, 0x06, 0xc1, 0x27, + 0xfc, 0xdc, 0xe4, 0x00, 0xd9, 0xe6, 0xe7, + 0x00, 0xfc, 0xff, 0xea); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ELVSS_ON, 0x00, 0x00); + s6e3fc2x01_test_key_off_lvl2(&dsi_ctx); + + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); + + return dsi_ctx.accum_err; +} + +static int s6e3fc2x01_enable(struct drm_panel *panel) +{ + struct samsung_s6e3fc2x01 *ctx = to_samsung_s6e3fc2x01(panel); + struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; + + s6e3fc2x01_test_key_on_lvl1(&dsi_ctx); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + s6e3fc2x01_test_key_off_lvl1(&dsi_ctx); + + return dsi_ctx.accum_err; +} + +static int s6e3fc2x01_off(struct samsung_s6e3fc2x01 *ctx) +{ + struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; + + s6e3fc2x01_test_key_on_lvl1(&dsi_ctx); + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); + + s6e3fc2x01_test_key_on_lvl2(&dsi_ctx); + mipi_dsi_usleep_range(&dsi_ctx, 16000, 17000); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x50); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x82); + s6e3fc2x01_test_key_off_lvl2(&dsi_ctx); + mipi_dsi_usleep_range(&dsi_ctx, 16000, 17000); + + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + + s6e3fc2x01_test_key_off_lvl1(&dsi_ctx); + + s6e3fc2x01_test_key_on_lvl2(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x01); + s6e3fc2x01_test_key_off_lvl2(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 160); + + return dsi_ctx.accum_err; +} + +static int s6e3fc2x01_disable(struct drm_panel *panel) +{ + struct samsung_s6e3fc2x01 *ctx = to_samsung_s6e3fc2x01(panel); + + s6e3fc2x01_off(ctx); + + return 0; +} + +static int s6e3fc2x01_prepare(struct drm_panel *panel) +{ + struct samsung_s6e3fc2x01 *ctx = to_samsung_s6e3fc2x01(panel); + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(s6e3fc2x01_supplies), ctx->supplies); + if (ret < 0) + return ret; + + s6e3fc2x01_reset(ctx); + + ret = s6e3fc2x01_on(ctx); + if (ret < 0) { + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(s6e3fc2x01_supplies), ctx->supplies); + return ret; + } + + return 0; +} + +static int s6e3fc2x01_unprepare(struct drm_panel *panel) +{ + struct samsung_s6e3fc2x01 *ctx = to_samsung_s6e3fc2x01(panel); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(s6e3fc2x01_supplies), ctx->supplies); + + return 0; +} + +static const struct drm_display_mode ams641rw_mode = { + .clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 72, + .hsync_end = 1080 + 72 + 16, + .htotal = 1080 + 72 + 16 + 36, + .vdisplay = 2340, + .vsync_start = 2340 + 32, + .vsync_end = 2340 + 32 + 4, + .vtotal = 2340 + 32 + 4 + 18, + .width_mm = 68, + .height_mm = 145, +}; + +static int s6e3fc2x01_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &ams641rw_mode); +} + +static const struct drm_panel_funcs samsung_s6e3fc2x01_panel_funcs = { + .prepare = s6e3fc2x01_prepare, + .enable = s6e3fc2x01_enable, + .disable = s6e3fc2x01_disable, + .unprepare = s6e3fc2x01_unprepare, + .get_modes = s6e3fc2x01_get_modes, +}; + +static int s6e3fc2x01_panel_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int err; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + err = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); + if (err < 0) + return err; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static const struct backlight_ops s6e3fc2x01_panel_bl_ops = { + .update_status = s6e3fc2x01_panel_bl_update_status, +}; + +static struct backlight_device * +s6e3fc2x01_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_PLATFORM, + .brightness = 512, + .max_brightness = 1023, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &s6e3fc2x01_panel_bl_ops, &props); +} + +static int s6e3fc2x01_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct samsung_s6e3fc2x01 *ctx; + int ret; + + ctx = devm_drm_panel_alloc(dev, struct samsung_s6e3fc2x01, panel, + &samsung_s6e3fc2x01_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + ret = devm_regulator_bulk_get_const(dev, + ARRAY_SIZE(s6e3fc2x01_supplies), + s6e3fc2x01_supplies, + &ctx->supplies); + if (ret) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + + /* keep the display on for flicker-free experience */ + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; + + ctx->panel.prepare_prev_first = true; + + ctx->panel.backlight = s6e3fc2x01_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static void s6e3fc2x01_remove(struct mipi_dsi_device *dsi) +{ + struct samsung_s6e3fc2x01 *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id s6e3fc2x01_of_match[] = { + { .compatible = "samsung,s6e3fc2x01-ams641rw", .data = &ams641rw_mode }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, s6e3fc2x01_of_match); + +static struct mipi_dsi_driver s6e3fc2x01_driver = { + .probe = s6e3fc2x01_probe, + .remove = s6e3fc2x01_remove, + .driver = { + .name = "panel-samsung-s6e3fc2x01", + .of_match_table = s6e3fc2x01_of_match, + }, +}; +module_mipi_dsi_driver(s6e3fc2x01_driver); + +MODULE_AUTHOR("David Heidelberg <david@ixit.cz>"); +MODULE_DESCRIPTION("DRM driver for Samsung S6E3FC2X01 DDIC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c index 064258217d50..e00a497a7c96 100644 --- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c +++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c @@ -16,20 +16,32 @@ #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> struct sofef00_panel { struct drm_panel panel; struct mipi_dsi_device *dsi; - struct regulator *supply; + struct regulator_bulk_data *supplies; struct gpio_desc *reset_gpio; }; +static const struct regulator_bulk_data sofef00_supplies[] = { + { .supply = "vddio" }, + { .supply = "vci" }, + { .supply = "poc" }, +}; + static inline struct sofef00_panel *to_sofef00_panel(struct drm_panel *panel) { return container_of(panel, struct sofef00_panel, panel); } +#define sofef00_test_key_on_lvl2(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x5a, 0x5a) +#define sofef00_test_key_off_lvl2(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0xa5, 0xa5) + static void sofef00_panel_reset(struct sofef00_panel *ctx) { gpiod_set_value_cansleep(ctx->reset_gpio, 0); @@ -50,18 +62,26 @@ static int sofef00_panel_on(struct sofef00_panel *ctx) mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); - + sofef00_test_key_on_lvl2(&dsi_ctx); mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + sofef00_test_key_off_lvl2(&dsi_ctx); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); + sofef00_test_key_on_lvl2(&dsi_ctx); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); + sofef00_test_key_off_lvl2(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + return dsi_ctx.accum_err; +} + +static int sofef00_enable(struct drm_panel *panel) +{ + struct sofef00_panel *ctx = to_sofef00_panel(panel); + struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); return dsi_ctx.accum_err; @@ -72,8 +92,6 @@ static int sofef00_panel_off(struct sofef00_panel *ctx) struct mipi_dsi_device *dsi = ctx->dsi; struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); mipi_dsi_msleep(&dsi_ctx, 40); @@ -86,70 +104,70 @@ static int sofef00_panel_off(struct sofef00_panel *ctx) static int sofef00_panel_prepare(struct drm_panel *panel) { struct sofef00_panel *ctx = to_sofef00_panel(panel); - struct device *dev = &ctx->dsi->dev; int ret; - ret = regulator_enable(ctx->supply); - if (ret < 0) { - dev_err(dev, "Failed to enable regulator: %d\n", ret); + ret = regulator_bulk_enable(ARRAY_SIZE(sofef00_supplies), ctx->supplies); + if (ret < 0) return ret; - } sofef00_panel_reset(ctx); ret = sofef00_panel_on(ctx); if (ret < 0) { gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(sofef00_supplies), ctx->supplies); return ret; } return 0; } -static int sofef00_panel_unprepare(struct drm_panel *panel) +static int sofef00_disable(struct drm_panel *panel) { struct sofef00_panel *ctx = to_sofef00_panel(panel); sofef00_panel_off(ctx); - regulator_disable(ctx->supply); return 0; } -static const struct drm_display_mode enchilada_panel_mode = { +static int sofef00_panel_unprepare(struct drm_panel *panel) +{ + struct sofef00_panel *ctx = to_sofef00_panel(panel); + + regulator_bulk_disable(ARRAY_SIZE(sofef00_supplies), ctx->supplies); + + return 0; +} + +static const struct drm_display_mode ams628nw01_panel_mode = { .clock = (1080 + 112 + 16 + 36) * (2280 + 36 + 8 + 12) * 60 / 1000, + .hdisplay = 1080, .hsync_start = 1080 + 112, .hsync_end = 1080 + 112 + 16, .htotal = 1080 + 112 + 16 + 36, + .vdisplay = 2280, .vsync_start = 2280 + 36, .vsync_end = 2280 + 36 + 8, .vtotal = 2280 + 36 + 8 + 12, + .width_mm = 68, .height_mm = 145, + + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }; static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode); - if (!mode) - return -ENOMEM; - - drm_mode_set_name(mode); - - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - connector->display_info.width_mm = mode->width_mm; - connector->display_info.height_mm = mode->height_mm; - drm_mode_probed_add(connector, mode); - - return 1; + return drm_connector_helper_get_modes_fixed(connector, &ams628nw01_panel_mode); } static const struct drm_panel_funcs sofef00_panel_panel_funcs = { .prepare = sofef00_panel_prepare, + .enable = sofef00_enable, + .disable = sofef00_disable, .unprepare = sofef00_panel_unprepare, .get_modes = sofef00_panel_get_modes, }; @@ -160,10 +178,14 @@ static int sofef00_panel_bl_update_status(struct backlight_device *bl) int err; u16 brightness = (u16)backlight_get_brightness(bl); + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + err = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); if (err < 0) return err; + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + return 0; } @@ -177,7 +199,7 @@ sofef00_create_backlight(struct mipi_dsi_device *dsi) struct device *dev = &dsi->dev; const struct backlight_properties props = { .type = BACKLIGHT_PLATFORM, - .brightness = 1023, + .brightness = 512, .max_brightness = 1023, }; @@ -197,10 +219,12 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi) if (IS_ERR(ctx)) return PTR_ERR(ctx); - ctx->supply = devm_regulator_get(dev, "vddio"); - if (IS_ERR(ctx->supply)) - return dev_err_probe(dev, PTR_ERR(ctx->supply), - "Failed to get vddio regulator\n"); + ret = devm_regulator_bulk_get_const(dev, + ARRAY_SIZE(sofef00_supplies), + sofef00_supplies, + &ctx->supplies); + if (ret) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ctx->reset_gpio)) @@ -212,6 +236,10 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; + + ctx->panel.prepare_prev_first = true; ctx->panel.backlight = sofef00_create_backlight(dsi); if (IS_ERR(ctx->panel.backlight)) @@ -243,7 +271,8 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi) } static const struct of_device_id sofef00_panel_of_match[] = { - { .compatible = "samsung,sofef00" }, + { .compatible = "samsung,sofef00" }, /* legacy */ + { .compatible = "samsung,sofef00-ams628nw01" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sofef00_panel_of_match); @@ -252,7 +281,7 @@ static struct mipi_dsi_driver sofef00_panel_driver = { .probe = sofef00_panel_probe, .remove = sofef00_panel_remove, .driver = { - .name = "panel-oneplus6", + .name = "panel-samsung-sofef00", .of_match_table = sofef00_panel_of_match, }, }; @@ -260,5 +289,5 @@ static struct mipi_dsi_driver sofef00_panel_driver = { module_mipi_dsi_driver(sofef00_panel_driver); MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>"); -MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones"); +MODULE_DESCRIPTION("DRM driver for Samsung SOFEF00 DDIC"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c new file mode 100644 index 000000000000..8c00fde1c4a9 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016 XiaoMi, Inc. + * Copyright (c) 2024 Svyatoslav Ryhel <clamor95@gmail.com> + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_device.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +static const struct regulator_bulk_data sharp_supplies[] = { + { .supply = "avdd" }, { .supply = "vddio" }, + { .supply = "vsp" }, { .supply = "vsn" }, +}; + +struct sharp_panel { + struct drm_panel panel; + struct mipi_dsi_device *dsi[2]; + + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; + + const struct drm_display_mode *mode; +}; + +static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel) +{ + return container_of(panel, struct sharp_panel, panel); +} + +static void sharp_panel_reset(struct sharp_panel *sharp) +{ + gpiod_set_value_cansleep(sharp->reset_gpio, 1); + usleep_range(2000, 3000); + gpiod_set_value_cansleep(sharp->reset_gpio, 0); + usleep_range(2000, 3000); +} + +static int sharp_panel_prepare(struct drm_panel *panel) +{ + struct sharp_panel *sharp = to_sharp_panel(panel); + struct device *dev = panel->dev; + struct mipi_dsi_device *dsi0 = sharp->dsi[0]; + struct mipi_dsi_device *dsi1 = sharp->dsi[1]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL }; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(sharp_supplies), sharp->supplies); + if (ret) { + dev_err(dev, "error enabling regulators (%d)\n", ret); + return ret; + } + + msleep(24); + + if (sharp->reset_gpio) + sharp_panel_reset(sharp); + + msleep(32); + + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_EXIT_SLEEP_MODE); + mipi_dsi_msleep(&dsi_ctx, 120); + + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, + MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, + MIPI_DCS_WRITE_POWER_SAVE, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, + MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c); + + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_SET_DISPLAY_ON); + + return 0; +} + +static int sharp_panel_unprepare(struct drm_panel *panel) +{ + struct sharp_panel *sharp = to_sharp_panel(panel); + struct mipi_dsi_device *dsi0 = sharp->dsi[0]; + struct mipi_dsi_device *dsi1 = sharp->dsi[1]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL }; + + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_SET_DISPLAY_OFF); + mipi_dsi_msleep(&dsi_ctx, 100); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_ENTER_SLEEP_MODE); + mipi_dsi_msleep(&dsi_ctx, 150); + + if (sharp->reset_gpio) + gpiod_set_value_cansleep(sharp->reset_gpio, 1); + + return regulator_bulk_disable(ARRAY_SIZE(sharp_supplies), sharp->supplies); +} + +static const struct drm_display_mode default_mode = { + .clock = (1536 + 136 + 28 + 28) * (2048 + 14 + 8 + 2) * 60 / 1000, + .hdisplay = 1536, + .hsync_start = 1536 + 136, + .hsync_end = 1536 + 136 + 28, + .htotal = 1536 + 136 + 28 + 28, + .vdisplay = 2048, + .vsync_start = 2048 + 14, + .vsync_end = 2048 + 14 + 8, + .vtotal = 2048 + 14 + 8 + 2, + .width_mm = 120, + .height_mm = 160, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + +static int sharp_panel_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &default_mode); +} + +static const struct drm_panel_funcs sharp_panel_funcs = { + .unprepare = sharp_panel_unprepare, + .prepare = sharp_panel_prepare, + .get_modes = sharp_panel_get_modes, +}; + +static int sharp_panel_probe(struct mipi_dsi_device *dsi) +{ + const struct mipi_dsi_device_info info = { "sharp-link1", 0, NULL }; + struct device *dev = &dsi->dev; + struct device_node *dsi_r; + struct mipi_dsi_host *dsi_r_host; + struct sharp_panel *sharp; + int i, ret; + + sharp = devm_drm_panel_alloc(dev, struct sharp_panel, panel, + &sharp_panel_funcs, DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(sharp)) + return PTR_ERR(sharp); + + ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(sharp_supplies), + sharp_supplies, &sharp->supplies); + if (ret) + return dev_err_probe(dev, ret, "failed to get supplies\n"); + + sharp->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(sharp->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(sharp->reset_gpio), + "failed to get reset GPIO\n"); + + /* Panel is always connected to two DSI hosts, DSI0 is left, DSI1 is right */ + dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1); + if (!dsi_r) + return dev_err_probe(dev, -ENODEV, "failed to find second DSI host node\n"); + + dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r); + of_node_put(dsi_r); + if (!dsi_r_host) + return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n"); + + sharp->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi_r_host, &info); + if (IS_ERR(sharp->dsi[1])) + return dev_err_probe(dev, PTR_ERR(sharp->dsi[1]), + "second link registration failed\n"); + + sharp->dsi[0] = dsi; + mipi_dsi_set_drvdata(dsi, sharp); + + ret = drm_panel_of_backlight(&sharp->panel); + if (ret) + return dev_err_probe(dev, ret, "Failed to get backlight\n"); + + drm_panel_add(&sharp->panel); + + for (i = 0; i < ARRAY_SIZE(sharp->dsi); i++) { + if (!sharp->dsi[i]) + continue; + + sharp->dsi[i]->lanes = 4; + sharp->dsi[i]->format = MIPI_DSI_FMT_RGB888; + sharp->dsi[i]->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM; + + ret = devm_mipi_dsi_attach(dev, sharp->dsi[i]); + if (ret < 0) { + drm_panel_remove(&sharp->panel); + return dev_err_probe(dev, ret, "failed to attach to DSI%d\n", i); + } + } + + return 0; +} + +static void sharp_panel_remove(struct mipi_dsi_device *dsi) +{ + struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&sharp->panel); +} + +static const struct of_device_id sharp_of_match[] = { + { .compatible = "sharp,lq079l1sx01" }, + { } +}; +MODULE_DEVICE_TABLE(of, sharp_of_match); + +static struct mipi_dsi_driver sharp_panel_driver = { + .driver = { + .name = "panel-sharp-lq079l1sx01", + .of_match_table = sharp_of_match, + }, + .probe = sharp_panel_probe, + .remove = sharp_panel_remove, +}; +module_mipi_dsi_driver(sharp_panel_driver); + +MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>"); +MODULE_DESCRIPTION("Sharp LQ079L1SX01 panel driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 0019de93be1b..b26b682826bc 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2889,6 +2889,38 @@ static const struct panel_desc innolux_zj070na_01p = { }, }; +static const struct display_timing jutouch_jt101tm023_timing = { + .pixelclock = { 66300000, 72400000, 78900000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 12, 72, 132 }, + .hback_porch = { 88, 88, 88 }, + .hsync_len = { 10, 10, 48 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 1, 15, 49 }, + .vback_porch = { 23, 23, 23 }, + .vsync_len = { 5, 6, 13 }, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | + DISPLAY_FLAGS_DE_HIGH, +}; + +static const struct panel_desc jutouch_jt101tm023 = { + .timings = &jutouch_jt101tm023_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 217, + .height = 136, + }, + .delay = { + .enable = 50, + .disable = 50, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + + static const struct display_timing koe_tx14d24vm1bpa_timing = { .pixelclock = { 5580000, 5850000, 6200000 }, .hactive = { 320, 320, 320 }, @@ -4074,6 +4106,30 @@ static const struct panel_desc qishenglong_gopher2b_lcd = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; +static const struct display_timing raystar_rff500f_awh_dnn_timing = { + .pixelclock = { 23000000, 25000000, 27000000 }, + .hactive = { 800, 800, 800 }, + .hback_porch = { 4, 8, 48 }, + .hfront_porch = { 4, 8, 48 }, + .hsync_len = { 2, 4, 8 }, + .vactive = { 480, 480, 480 }, + .vback_porch = { 4, 8, 12 }, + .vfront_porch = { 4, 8, 12 }, + .vsync_len = { 2, 4, 8 }, +}; + +static const struct panel_desc raystar_rff500f_awh_dnn = { + .timings = &raystar_rff500f_awh_dnn_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 108, + .height = 65, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct display_timing rocktech_rk043fn48h_timing = { .pixelclock = { 6000000, 9000000, 12000000 }, .hactive = { 480, 480, 480 }, @@ -4191,6 +4247,37 @@ static const struct panel_desc samsung_ltl101al01 = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; +static const struct display_timing samsung_ltl106al01_timing = { + .pixelclock = { 71980000, 71980000, 71980000 }, + .hactive = { 1366, 1366, 1366 }, + .hfront_porch = { 56, 56, 56 }, + .hback_porch = { 106, 106, 106 }, + .hsync_len = { 14, 14, 14 }, + .vactive = { 768, 768, 768 }, + .vfront_porch = { 3, 3, 3 }, + .vback_porch = { 6, 6, 6 }, + .vsync_len = { 1, 1, 1 }, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, +}; + +static const struct panel_desc samsung_ltl106al01 = { + .timings = &samsung_ltl106al01_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 235, + .height = 132, + }, + .delay = { + .prepare = 5, + .enable = 10, + .disable = 10, + .unprepare = 5, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode samsung_ltn101nt05_mode = { .clock = 54030, .hdisplay = 1024, @@ -5209,6 +5296,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "innolux,zj070na-01p", .data = &innolux_zj070na_01p, }, { + .compatible = "jutouch,jt101tm023", + .data = &jutouch_jt101tm023, + }, { .compatible = "koe,tx14d24vm1bpa", .data = &koe_tx14d24vm1bpa, }, { @@ -5344,6 +5434,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "qishenglong,gopher2b-lcd", .data = &qishenglong_gopher2b_lcd, }, { + .compatible = "raystar,rff500f-awh-dnn", + .data = &raystar_rff500f_awh_dnn, + }, { .compatible = "rocktech,rk043fn48h", .data = &rocktech_rk043fn48h, }, { @@ -5356,6 +5449,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "samsung,ltl101al01", .data = &samsung_ltl101al01, }, { + .compatible = "samsung,ltl106al01", + .data = &samsung_ltl106al01, + }, { .compatible = "samsung,ltn101nt05", .data = &samsung_ltn101nt05, }, { @@ -5565,34 +5661,6 @@ static const struct panel_desc_dsi boe_tv080wum_nl0 = { .lanes = 4, }; -static const struct drm_display_mode lg_ld070wx3_sl01_mode = { - .clock = 71000, - .hdisplay = 800, - .hsync_start = 800 + 32, - .hsync_end = 800 + 32 + 1, - .htotal = 800 + 32 + 1 + 57, - .vdisplay = 1280, - .vsync_start = 1280 + 28, - .vsync_end = 1280 + 28 + 1, - .vtotal = 1280 + 28 + 1 + 14, -}; - -static const struct panel_desc_dsi lg_ld070wx3_sl01 = { - .desc = { - .modes = &lg_ld070wx3_sl01_mode, - .num_modes = 1, - .bpc = 8, - .size = { - .width = 94, - .height = 151, - }, - .connector_type = DRM_MODE_CONNECTOR_DSI, - }, - .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS, - .format = MIPI_DSI_FMT_RGB888, - .lanes = 4, -}; - static const struct drm_display_mode lg_lh500wx1_sd03_mode = { .clock = 67000, .hdisplay = 720, @@ -5717,9 +5785,6 @@ static const struct of_device_id dsi_of_match[] = { .compatible = "boe,tv080wum-nl0", .data = &boe_tv080wum_nl0 }, { - .compatible = "lg,ld070wx3-sl01", - .data = &lg_ld070wx3_sl01 - }, { .compatible = "lg,lh500wx1-sd03", .data = &lg_lh500wx1_sd03 }, { diff --git a/drivers/gpu/drm/panel/panel-synaptics-tddi.c b/drivers/gpu/drm/panel/panel-synaptics-tddi.c new file mode 100644 index 000000000000..0aea1854710e --- /dev/null +++ b/drivers/gpu/drm/panel/panel-synaptics-tddi.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synaptics TDDI display panel driver. + * + * Copyright (C) 2025 Kaustabh Chakraborty <kauschluss@disroot.org> + */ + +#include <linux/backlight.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +struct tddi_panel_data { + u8 lanes; + /* wait timings for panel enable */ + u8 delay_ms_sleep_exit; + u8 delay_ms_display_on; + /* wait timings for panel disable */ + u8 delay_ms_display_off; + u8 delay_ms_sleep_enter; +}; + +struct tddi_ctx { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct drm_display_mode mode; + struct backlight_device *backlight; + const struct tddi_panel_data *data; + struct regulator_bulk_data *supplies; + struct gpio_desc *reset_gpio; + struct gpio_desc *backlight_gpio; +}; + +static const struct regulator_bulk_data tddi_supplies[] = { + { .supply = "vio" }, + { .supply = "vsn" }, + { .supply = "vsp" }, +}; + +static inline struct tddi_ctx *to_tddi_ctx(struct drm_panel *panel) +{ + return container_of(panel, struct tddi_ctx, panel); +} + +static int tddi_update_status(struct backlight_device *backlight) +{ + struct tddi_ctx *ctx = bl_get_data(backlight); + struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi }; + u8 brightness = backlight_get_brightness(backlight); + + if (!ctx->panel.enabled) + return 0; + + mipi_dsi_dcs_set_display_brightness_multi(&dsi, brightness); + + return dsi.accum_err; +} + +static int tddi_prepare(struct drm_panel *panel) +{ + struct tddi_ctx *ctx = to_tddi_ctx(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(tddi_supplies), ctx->supplies); + if (ret < 0) { + dev_err(dev, "failed to enable regulators: %d\n", ret); + return ret; + } + + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); + + gpiod_set_value_cansleep(ctx->backlight_gpio, 0); + usleep_range(5000, 6000); + + return 0; +} + +static int tddi_unprepare(struct drm_panel *panel) +{ + struct tddi_ctx *ctx = to_tddi_ctx(panel); + + gpiod_set_value_cansleep(ctx->backlight_gpio, 1); + usleep_range(5000, 6000); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(5000, 6000); + + regulator_bulk_disable(ARRAY_SIZE(tddi_supplies), ctx->supplies); + + return 0; +} + +static int tddi_enable(struct drm_panel *panel) +{ + struct tddi_ctx *ctx = to_tddi_ctx(panel); + struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi }; + u8 brightness = ctx->backlight->props.brightness; + + mipi_dsi_dcs_write_seq_multi(&dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x0c); + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi); + mipi_dsi_msleep(&dsi, ctx->data->delay_ms_sleep_exit); + + /* sync the panel with the backlight's brightness level */ + mipi_dsi_dcs_set_display_brightness_multi(&dsi, brightness); + + mipi_dsi_dcs_set_display_on_multi(&dsi); + mipi_dsi_msleep(&dsi, ctx->data->delay_ms_display_on); + + return dsi.accum_err; +}; + +static int tddi_disable(struct drm_panel *panel) +{ + struct tddi_ctx *ctx = to_tddi_ctx(panel); + struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi }; + + mipi_dsi_dcs_set_display_off_multi(&dsi); + mipi_dsi_msleep(&dsi, ctx->data->delay_ms_display_off); + + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi); + mipi_dsi_msleep(&dsi, ctx->data->delay_ms_sleep_enter); + + return dsi.accum_err; +} + +static int tddi_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct tddi_ctx *ctx = to_tddi_ctx(panel); + + return drm_connector_helper_get_modes_fixed(connector, &ctx->mode); +} + +static const struct backlight_ops tddi_bl_ops = { + .update_status = tddi_update_status, +}; + +static const struct backlight_properties tddi_bl_props = { + .type = BACKLIGHT_PLATFORM, + .brightness = 255, + .max_brightness = 255, +}; + +static const struct drm_panel_funcs tddi_drm_panel_funcs = { + .prepare = tddi_prepare, + .unprepare = tddi_unprepare, + .enable = tddi_enable, + .disable = tddi_disable, + .get_modes = tddi_get_modes, +}; + +static int tddi_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct tddi_ctx *ctx; + int ret; + + ctx = devm_drm_panel_alloc(dev, struct tddi_ctx, panel, + &tddi_drm_panel_funcs, DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + ctx->data = of_device_get_match_data(dev); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(tddi_supplies), + tddi_supplies, &ctx->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get regulators\n"); + + ctx->backlight_gpio = devm_gpiod_get_optional(dev, "backlight", GPIOD_ASIS); + if (IS_ERR(ctx->backlight_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->backlight_gpio), + "failed to get backlight-gpios\n"); + + ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "failed to get reset-gpios\n"); + + ret = of_get_drm_panel_display_mode(dev->of_node, &ctx->mode, NULL); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get panel timings\n"); + + ctx->backlight = devm_backlight_device_register(dev, dev_name(dev), dev, + ctx, &tddi_bl_ops, + &tddi_bl_props); + if (IS_ERR(ctx->backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->backlight), + "failed to register backlight device"); + + dsi->lanes = ctx->data->lanes; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_VIDEO_NO_HFP; + + ctx->panel.prepare_prev_first = true; + drm_panel_add(&ctx->panel); + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return dev_err_probe(dev, ret, "failed to attach to DSI host\n"); + } + + return 0; +} + +static void tddi_remove(struct mipi_dsi_device *dsi) +{ + struct tddi_ctx *ctx = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&ctx->panel); +} + +static const struct tddi_panel_data td4101_panel_data = { + .lanes = 2, + /* wait timings for panel enable */ + .delay_ms_sleep_exit = 100, + .delay_ms_display_on = 0, + /* wait timings for panel disable */ + .delay_ms_display_off = 20, + .delay_ms_sleep_enter = 90, +}; + +static const struct tddi_panel_data td4300_panel_data = { + .lanes = 4, + /* wait timings for panel enable */ + .delay_ms_sleep_exit = 100, + .delay_ms_display_on = 0, + /* wait timings for panel disable */ + .delay_ms_display_off = 0, + .delay_ms_sleep_enter = 0, +}; + +static const struct of_device_id tddi_of_device_id[] = { + { + .compatible = "syna,td4101-panel", + .data = &td4101_panel_data, + }, { + .compatible = "syna,td4300-panel", + .data = &td4300_panel_data, + }, { } +}; +MODULE_DEVICE_TABLE(of, tddi_of_device_id); + +static struct mipi_dsi_driver tddi_dsi_driver = { + .probe = tddi_probe, + .remove = tddi_remove, + .driver = { + .name = "panel-synaptics-tddi", + .of_match_table = tddi_of_device_id, + }, +}; +module_mipi_dsi_driver(tddi_dsi_driver); + +MODULE_AUTHOR("Kaustabh Chakraborty <kauschluss@disroot.org>"); +MODULE_DESCRIPTION("Synaptics TDDI Display Panel Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c index 909c280eab1f..e5e688cf98fd 100644 --- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c +++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c @@ -3,6 +3,7 @@ * Copyright (c) 2019, The Linux Foundation. All rights reserved. */ +#include <linux/backlight.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/property.h> @@ -20,6 +21,8 @@ struct visionox_rm69299_panel_desc { const struct drm_display_mode *mode; const u8 *init_seq; unsigned int init_seq_len; + int max_brightness; + int initial_brightness; }; struct visionox_rm69299 { @@ -192,7 +195,7 @@ static int visionox_rm69299_unprepare(struct drm_panel *panel) struct visionox_rm69299 *ctx = panel_to_ctx(panel); struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; - ctx->dsi->mode_flags = 0; + ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); @@ -247,7 +250,7 @@ static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = { }; static const struct drm_display_mode visionox_rm69299_1080x2160_60hz = { - .clock = 158695, + .clock = (2160 + 8 + 4 + 4) * (1080 + 26 + 2 + 36) * 60 / 1000, .hdisplay = 1080, .hsync_start = 1080 + 26, .hsync_end = 1080 + 26 + 2, @@ -285,6 +288,63 @@ static const struct drm_panel_funcs visionox_rm69299_drm_funcs = { .get_modes = visionox_rm69299_get_modes, }; +static int visionox_rm69299_bl_get_brightness(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return brightness; +} + +static int visionox_rm69299_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static const struct backlight_ops visionox_rm69299_bl_ops = { + .update_status = visionox_rm69299_bl_update_status, + .get_brightness = visionox_rm69299_bl_get_brightness, +}; + +static struct backlight_device * +visionox_rm69299_create_backlight(struct visionox_rm69299 *ctx) +{ + struct device *dev = &ctx->dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = ctx->desc->initial_brightness, + .max_brightness = ctx->desc->max_brightness, + }; + + if (!ctx->desc->max_brightness) + return 0; + + return devm_backlight_device_register(dev, dev_name(dev), dev, ctx->dsi, + &visionox_rm69299_bl_ops, + &props); +} + static int visionox_rm69299_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; @@ -316,6 +376,11 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi) return PTR_ERR(ctx->reset_gpio); } + ctx->panel.backlight = visionox_rm69299_create_backlight(ctx); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + drm_panel_add(&ctx->panel); dsi->lanes = 4; @@ -353,6 +418,8 @@ const struct visionox_rm69299_panel_desc visionox_rm69299_shift_desc = { .mode = &visionox_rm69299_1080x2160_60hz, .init_seq = (const u8 *)visionox_rm69299_1080x2160_60hz_init_seq, .init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2160_60hz_init_seq), + .max_brightness = 255, + .initial_brightness = 50, }; static const struct of_device_id visionox_rm69299_of_match[] = { diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 5d0dce10336b..b51c30778811 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -8,6 +8,8 @@ #include <linux/platform_device.h> #include <linux/pm_opp.h> +#include <drm/drm_print.h> + #include "panfrost_device.h" #include "panfrost_devfreq.h" @@ -74,7 +76,7 @@ static int panfrost_devfreq_get_dev_status(struct device *dev, spin_unlock_irqrestore(&pfdevfreq->lock, irqflags); - dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", + dev_dbg(pfdev->base.dev, "busy %lu total %lu %lu %% freq %lu MHz\n", status->busy_time, status->total_time, status->busy_time / (status->total_time / 100), status->current_frequency / 1000 / 1000); @@ -119,7 +121,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) int ret; struct dev_pm_opp *opp; unsigned long cur_freq; - struct device *dev = &pfdev->pdev->dev; + struct device *dev = pfdev->base.dev; struct devfreq *devfreq; struct thermal_cooling_device *cooling; struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq; diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index 04bec27449cb..c61b97af120c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -20,9 +20,9 @@ static int panfrost_reset_init(struct panfrost_device *pfdev) { - pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->dev); + pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->base.dev); if (IS_ERR(pfdev->rstc)) { - dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc)); + dev_err(pfdev->base.dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc)); return PTR_ERR(pfdev->rstc); } @@ -39,22 +39,22 @@ static int panfrost_clk_init(struct panfrost_device *pfdev) int err; unsigned long rate; - pfdev->clock = devm_clk_get(pfdev->dev, NULL); + pfdev->clock = devm_clk_get(pfdev->base.dev, NULL); if (IS_ERR(pfdev->clock)) { - dev_err(pfdev->dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock)); + dev_err(pfdev->base.dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock)); return PTR_ERR(pfdev->clock); } rate = clk_get_rate(pfdev->clock); - dev_info(pfdev->dev, "clock rate = %lu\n", rate); + dev_info(pfdev->base.dev, "clock rate = %lu\n", rate); err = clk_prepare_enable(pfdev->clock); if (err) return err; - pfdev->bus_clock = devm_clk_get_optional(pfdev->dev, "bus"); + pfdev->bus_clock = devm_clk_get_optional(pfdev->base.dev, "bus"); if (IS_ERR(pfdev->bus_clock)) { - dev_err(pfdev->dev, "get bus_clock failed %ld\n", + dev_err(pfdev->base.dev, "get bus_clock failed %ld\n", PTR_ERR(pfdev->bus_clock)); err = PTR_ERR(pfdev->bus_clock); goto disable_clock; @@ -62,7 +62,7 @@ static int panfrost_clk_init(struct panfrost_device *pfdev) if (pfdev->bus_clock) { rate = clk_get_rate(pfdev->bus_clock); - dev_info(pfdev->dev, "bus_clock rate = %lu\n", rate); + dev_info(pfdev->base.dev, "bus_clock rate = %lu\n", rate); err = clk_prepare_enable(pfdev->bus_clock); if (err) @@ -87,7 +87,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev) { int ret, i; - pfdev->regulators = devm_kcalloc(pfdev->dev, pfdev->comp->num_supplies, + pfdev->regulators = devm_kcalloc(pfdev->base.dev, pfdev->comp->num_supplies, sizeof(*pfdev->regulators), GFP_KERNEL); if (!pfdev->regulators) @@ -96,12 +96,12 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev) for (i = 0; i < pfdev->comp->num_supplies; i++) pfdev->regulators[i].supply = pfdev->comp->supply_names[i]; - ret = devm_regulator_bulk_get(pfdev->dev, + ret = devm_regulator_bulk_get(pfdev->base.dev, pfdev->comp->num_supplies, pfdev->regulators); if (ret < 0) { if (ret != -EPROBE_DEFER) - dev_err(pfdev->dev, "failed to get regulators: %d\n", + dev_err(pfdev->base.dev, "failed to get regulators: %d\n", ret); return ret; } @@ -109,7 +109,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev) ret = regulator_bulk_enable(pfdev->comp->num_supplies, pfdev->regulators); if (ret < 0) { - dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret); + dev_err(pfdev->base.dev, "failed to enable regulators: %d\n", ret); return ret; } @@ -144,7 +144,7 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev) int err; int i, num_domains; - num_domains = of_count_phandle_with_args(pfdev->dev->of_node, + num_domains = of_count_phandle_with_args(pfdev->base.dev->of_node, "power-domains", "#power-domain-cells"); @@ -156,7 +156,7 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev) return 0; if (num_domains != pfdev->comp->num_pm_domains) { - dev_err(pfdev->dev, + dev_err(pfdev->base.dev, "Incorrect number of power domains: %d provided, %d needed\n", num_domains, pfdev->comp->num_pm_domains); return -EINVAL; @@ -168,20 +168,21 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev) for (i = 0; i < num_domains; i++) { pfdev->pm_domain_devs[i] = - dev_pm_domain_attach_by_name(pfdev->dev, - pfdev->comp->pm_domain_names[i]); + dev_pm_domain_attach_by_name(pfdev->base.dev, + pfdev->comp->pm_domain_names[i]); if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) { err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA; pfdev->pm_domain_devs[i] = NULL; - dev_err(pfdev->dev, + dev_err(pfdev->base.dev, "failed to get pm-domain %s(%d): %d\n", pfdev->comp->pm_domain_names[i], i, err); goto err; } - pfdev->pm_domain_links[i] = device_link_add(pfdev->dev, - pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME | - DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE); + pfdev->pm_domain_links[i] = + device_link_add(pfdev->base.dev, + pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME | + DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE); if (!pfdev->pm_domain_links[i]) { dev_err(pfdev->pm_domain_devs[i], "adding device link failed!\n"); @@ -220,20 +221,20 @@ int panfrost_device_init(struct panfrost_device *pfdev) err = panfrost_reset_init(pfdev); if (err) { - dev_err(pfdev->dev, "reset init failed %d\n", err); + dev_err(pfdev->base.dev, "reset init failed %d\n", err); goto out_pm_domain; } err = panfrost_clk_init(pfdev); if (err) { - dev_err(pfdev->dev, "clk init failed %d\n", err); + dev_err(pfdev->base.dev, "clk init failed %d\n", err); goto out_reset; } err = panfrost_devfreq_init(pfdev); if (err) { if (err != -EPROBE_DEFER) - dev_err(pfdev->dev, "devfreq init failed %d\n", err); + dev_err(pfdev->base.dev, "devfreq init failed %d\n", err); goto out_clk; } @@ -244,7 +245,7 @@ int panfrost_device_init(struct panfrost_device *pfdev) goto out_devfreq; } - pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0); + pfdev->iomem = devm_platform_ioremap_resource(to_platform_device(pfdev->base.dev), 0); if (IS_ERR(pfdev->iomem)) { err = PTR_ERR(pfdev->iomem); goto out_regulator; @@ -258,7 +259,7 @@ int panfrost_device_init(struct panfrost_device *pfdev) if (err) goto out_gpu; - err = panfrost_job_init(pfdev); + err = panfrost_jm_init(pfdev); if (err) goto out_mmu; @@ -268,7 +269,7 @@ int panfrost_device_init(struct panfrost_device *pfdev) return 0; out_job: - panfrost_job_fini(pfdev); + panfrost_jm_fini(pfdev); out_mmu: panfrost_mmu_fini(pfdev); out_gpu: @@ -289,7 +290,7 @@ out_pm_domain: void panfrost_device_fini(struct panfrost_device *pfdev) { panfrost_perfcnt_fini(pfdev); - panfrost_job_fini(pfdev); + panfrost_jm_fini(pfdev); panfrost_mmu_fini(pfdev); panfrost_gpu_fini(pfdev); panfrost_devfreq_fini(pfdev); @@ -399,13 +400,16 @@ bool panfrost_exception_needs_reset(const struct panfrost_device *pfdev, return false; } -void panfrost_device_reset(struct panfrost_device *pfdev) +void panfrost_device_reset(struct panfrost_device *pfdev, bool enable_job_int) { panfrost_gpu_soft_reset(pfdev); panfrost_gpu_power_on(pfdev); panfrost_mmu_reset(pfdev); - panfrost_job_enable_interrupts(pfdev); + + panfrost_jm_reset_interrupts(pfdev); + if (enable_job_int) + panfrost_jm_enable_interrupts(pfdev); } static int panfrost_device_runtime_resume(struct device *dev) @@ -429,7 +433,7 @@ static int panfrost_device_runtime_resume(struct device *dev) } } - panfrost_device_reset(pfdev); + panfrost_device_reset(pfdev, true); panfrost_devfreq_resume(pfdev); return 0; @@ -447,11 +451,11 @@ static int panfrost_device_runtime_suspend(struct device *dev) { struct panfrost_device *pfdev = dev_get_drvdata(dev); - if (!panfrost_job_is_idle(pfdev)) + if (!panfrost_jm_is_idle(pfdev)) return -EBUSY; panfrost_devfreq_suspend(pfdev); - panfrost_job_suspend_irq(pfdev); + panfrost_jm_suspend_irq(pfdev); panfrost_mmu_suspend_irq(pfdev); panfrost_gpu_suspend_irq(pfdev); panfrost_gpu_power_off(pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 077525a3ad68..e61c4329fd07 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -10,11 +10,13 @@ #include <linux/pm.h> #include <linux/regulator/consumer.h> #include <linux/spinlock.h> +#include <drm/drm_auth.h> #include <drm/drm_device.h> #include <drm/drm_mm.h> #include <drm/gpu_scheduler.h> #include "panfrost_devfreq.h" +#include "panfrost_job.h" struct panfrost_device; struct panfrost_mmu; @@ -22,9 +24,12 @@ struct panfrost_job_slot; struct panfrost_job; struct panfrost_perfcnt; -#define NUM_JOB_SLOTS 3 #define MAX_PM_DOMAINS 5 +#define ALL_JS_INT_MASK \ + (GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | \ + GENMASK(NUM_JOB_SLOTS - 1, 0)) + enum panfrost_drv_comp_bits { PANFROST_COMP_BIT_GPU, PANFROST_COMP_BIT_JOB, @@ -123,9 +128,7 @@ struct panfrost_device_debugfs { }; struct panfrost_device { - struct device *dev; - struct drm_device *ddev; - struct platform_device *pdev; + struct drm_device base; int gpu_irq; int mmu_irq; @@ -144,7 +147,6 @@ struct panfrost_device { DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX); spinlock_t as_lock; - unsigned long as_in_use_mask; unsigned long as_alloc_mask; unsigned long as_faulty_mask; struct list_head as_lru_list; @@ -206,16 +208,22 @@ struct panfrost_engine_usage { struct panfrost_file_priv { struct panfrost_device *pfdev; - struct drm_sched_entity sched_entity[NUM_JOB_SLOTS]; + struct xarray jm_ctxs; struct panfrost_mmu *mmu; struct panfrost_engine_usage engine_usage; }; +static inline bool panfrost_high_prio_allowed(struct drm_file *file) +{ + /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */ + return (capable(CAP_SYS_NICE) || drm_is_current_master(file)); +} + static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev) { - return ddev->dev_private; + return container_of(ddev, struct panfrost_device, base); } static inline int panfrost_model_cmp(struct panfrost_device *pfdev, s32 id) @@ -241,7 +249,7 @@ int panfrost_unstable_ioctl_check(void); int panfrost_device_init(struct panfrost_device *pfdev); void panfrost_device_fini(struct panfrost_device *pfdev); -void panfrost_device_reset(struct panfrost_device *pfdev); +void panfrost_device_reset(struct panfrost_device *pfdev, bool enable_job_int); extern const struct dev_pm_ops panfrost_pm_ops; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 1ea6c509a5d5..7d8c7c337606 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -16,6 +16,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_ioctl.h> +#include <drm/drm_print.h> #include <drm/drm_syncobj.h> #include <drm/drm_utils.h> @@ -36,7 +37,7 @@ static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev, { int ret; - ret = pm_runtime_resume_and_get(pfdev->dev); + ret = pm_runtime_resume_and_get(pfdev->base.dev); if (ret) return ret; @@ -44,14 +45,14 @@ static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev, *arg = panfrost_timestamp_read(pfdev); panfrost_cycle_counter_put(pfdev); - pm_runtime_put(pfdev->dev); + pm_runtime_put(pfdev->base.dev); return 0; } static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) { struct drm_panfrost_get_param *param = data; - struct panfrost_device *pfdev = ddev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(ddev); int ret; if (param->pad != 0) @@ -109,6 +110,14 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct #endif break; + case DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES: + param->value = BIT(PANFROST_JM_CTX_PRIORITY_LOW) | + BIT(PANFROST_JM_CTX_PRIORITY_MEDIUM); + + if (panfrost_high_prio_allowed(file)) + param->value |= BIT(PANFROST_JM_CTX_PRIORITY_HIGH); + break; + default: return -EINVAL; } @@ -275,13 +284,17 @@ fail: static int panfrost_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct panfrost_file_priv *file_priv = file->driver_priv; struct drm_panfrost_submit *args = data; struct drm_syncobj *sync_out = NULL; + struct panfrost_jm_ctx *jm_ctx; struct panfrost_job *job; int ret = 0, slot; + if (args->pad) + return -EINVAL; + if (!args->jc) return -EINVAL; @@ -294,10 +307,16 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, return -ENODEV; } + jm_ctx = panfrost_jm_ctx_from_handle(file, args->jm_ctx_handle); + if (!jm_ctx) { + ret = -EINVAL; + goto out_put_syncout; + } + job = kzalloc(sizeof(*job), GFP_KERNEL); if (!job) { ret = -ENOMEM; - goto out_put_syncout; + goto out_put_jm_ctx; } kref_init(&job->refcount); @@ -307,12 +326,13 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, job->requirements = args->requirements; job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); job->mmu = file_priv->mmu; + job->ctx = panfrost_jm_ctx_get(jm_ctx); job->engine_usage = &file_priv->engine_usage; slot = panfrost_job_get_slot(job); ret = drm_sched_job_init(&job->base, - &file_priv->sched_entity[slot], + &jm_ctx->slot_entity[slot], 1, NULL, file->client_id); if (ret) goto out_put_job; @@ -338,6 +358,8 @@ out_cleanup_job: drm_sched_job_cleanup(&job->base); out_put_job: panfrost_job_put(job); +out_put_jm_ctx: + panfrost_jm_ctx_put(jm_ctx); out_put_syncout: if (sync_out) drm_syncobj_put(sync_out); @@ -436,7 +458,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, { struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_madvise *args = data; - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct drm_gem_object *gem_obj; struct panfrost_gem_object *bo; int ret = 0; @@ -536,6 +558,27 @@ err_put_obj: return ret; } +static int panfrost_ioctl_jm_ctx_create(struct drm_device *dev, void *data, + struct drm_file *file) +{ + return panfrost_jm_ctx_create(file, data); +} + +static int panfrost_ioctl_jm_ctx_destroy(struct drm_device *dev, void *data, + struct drm_file *file) +{ + const struct drm_panfrost_jm_ctx_destroy *args = data; + + if (args->pad) + return -EINVAL; + + /* We can't destroy the default context created when the file is opened. */ + if (!args->handle) + return -EINVAL; + + return panfrost_jm_ctx_destroy(file, args->handle); +} + int panfrost_unstable_ioctl_check(void) { if (!unstable_ioctls) @@ -548,7 +591,7 @@ static int panfrost_open(struct drm_device *dev, struct drm_file *file) { int ret; - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct panfrost_file_priv *panfrost_priv; panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL); @@ -564,7 +607,7 @@ panfrost_open(struct drm_device *dev, struct drm_file *file) goto err_free; } - ret = panfrost_job_open(panfrost_priv); + ret = panfrost_jm_open(file); if (ret) goto err_job; @@ -583,7 +626,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file) struct panfrost_file_priv *panfrost_priv = file->driver_priv; panfrost_perfcnt_close(file); - panfrost_job_close(panfrost_priv); + panfrost_jm_close(file); panfrost_mmu_ctx_put(panfrost_priv->mmu); kfree(panfrost_priv); @@ -603,6 +646,8 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW), PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW), + PANFROST_IOCTL(JM_CTX_CREATE, jm_ctx_create, DRM_RENDER_ALLOW), + PANFROST_IOCTL(JM_CTX_DESTROY, jm_ctx_destroy, DRM_RENDER_ALLOW), }; static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev, @@ -624,30 +669,25 @@ static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev, * job spent on the GPU. */ - static const char * const engine_names[] = { - "fragment", "vertex-tiler", "compute-only" - }; - - BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS); - for (i = 0; i < NUM_JOB_SLOTS - 1; i++) { if (pfdev->profile_mode) { drm_printf(p, "drm-engine-%s:\t%llu ns\n", - engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]); + panfrost_engine_names[i], + panfrost_priv->engine_usage.elapsed_ns[i]); drm_printf(p, "drm-cycles-%s:\t%llu\n", - engine_names[i], panfrost_priv->engine_usage.cycles[i]); + panfrost_engine_names[i], + panfrost_priv->engine_usage.cycles[i]); } drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n", - engine_names[i], pfdev->pfdevfreq.fast_rate); + panfrost_engine_names[i], pfdev->pfdevfreq.fast_rate); drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n", - engine_names[i], pfdev->pfdevfreq.current_frequency); + panfrost_engine_names[i], pfdev->pfdevfreq.current_frequency); } } static void panfrost_show_fdinfo(struct drm_printer *p, struct drm_file *file) { - struct drm_device *dev = file->minor->dev; - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(file->minor->dev); panfrost_gpu_show_fdinfo(pfdev, file->driver_priv, p); @@ -664,16 +704,57 @@ static const struct file_operations panfrost_drm_driver_fops = { static int panthor_gems_show(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; - struct drm_device *dev = node->minor->dev; - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(node->minor->dev); panfrost_gem_debugfs_print_bos(pfdev, m); return 0; } +static void show_panfrost_jm_ctx(struct panfrost_jm_ctx *jm_ctx, u32 handle, + struct seq_file *m) +{ + struct drm_device *ddev = ((struct drm_info_node *)m->private)->minor->dev; + const char *prio = "UNKNOWN"; + + static const char * const prios[] = { + [DRM_SCHED_PRIORITY_HIGH] = "HIGH", + [DRM_SCHED_PRIORITY_NORMAL] = "NORMAL", + [DRM_SCHED_PRIORITY_LOW] = "LOW", + }; + + if (jm_ctx->slot_entity[0].priority != + jm_ctx->slot_entity[1].priority) + drm_warn(ddev, "Slot priorities should be the same in a single context"); + + if (jm_ctx->slot_entity[0].priority < ARRAY_SIZE(prios)) + prio = prios[jm_ctx->slot_entity[0].priority]; + + seq_printf(m, " JM context %u: priority %s\n", handle, prio); +} + +static int show_file_jm_ctxs(struct panfrost_file_priv *pfile, + struct seq_file *m) +{ + struct panfrost_jm_ctx *jm_ctx; + unsigned long i; + + xa_lock(&pfile->jm_ctxs); + xa_for_each(&pfile->jm_ctxs, i, jm_ctx) { + jm_ctx = panfrost_jm_ctx_get(jm_ctx); + xa_unlock(&pfile->jm_ctxs); + show_panfrost_jm_ctx(jm_ctx, i, m); + panfrost_jm_ctx_put(jm_ctx); + xa_lock(&pfile->jm_ctxs); + } + xa_unlock(&pfile->jm_ctxs); + + return 0; +} + static struct drm_info_list panthor_debugfs_list[] = { - {"gems", panthor_gems_show, 0, NULL}, + {"gems", + panthor_gems_show, 0, NULL}, }; static int panthor_gems_debugfs_init(struct drm_minor *minor) @@ -685,9 +766,64 @@ static int panthor_gems_debugfs_init(struct drm_minor *minor) return 0; } +static int show_each_file(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *ddev = node->minor->dev; + int (*show)(struct panfrost_file_priv *, struct seq_file *) = + node->info_ent->data; + struct drm_file *file; + int ret; + + ret = mutex_lock_interruptible(&ddev->filelist_mutex); + if (ret) + return ret; + + list_for_each_entry(file, &ddev->filelist, lhead) { + struct task_struct *task; + struct panfrost_file_priv *pfile = file->driver_priv; + struct pid *pid; + + /* + * Although we have a valid reference on file->pid, that does + * not guarantee that the task_struct who called get_pid() is + * still alive (e.g. get_pid(current) => fork() => exit()). + * Therefore, we need to protect this ->comm access using RCU. + */ + rcu_read_lock(); + pid = rcu_dereference(file->pid); + task = pid_task(pid, PIDTYPE_TGID); + seq_printf(m, "client_id %8llu pid %8d command %s:\n", + file->client_id, pid_nr(pid), + task ? task->comm : "<unknown>"); + rcu_read_unlock(); + + ret = show(pfile, m); + if (ret < 0) + break; + + seq_puts(m, "\n"); + } + + mutex_unlock(&ddev->filelist_mutex); + return ret; +} + +static struct drm_info_list panfrost_sched_debugfs_list[] = { + { "sched_ctxs", show_each_file, 0, show_file_jm_ctxs }, +}; + +static void panfrost_sched_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(panfrost_sched_debugfs_list, + ARRAY_SIZE(panfrost_sched_debugfs_list), + minor->debugfs_root, minor); +} + static void panfrost_debugfs_init(struct drm_minor *minor) { panthor_gems_debugfs_init(minor); + panfrost_sched_debugfs_init(minor); } #endif @@ -699,6 +835,8 @@ static void panfrost_debugfs_init(struct drm_minor *minor) * - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT * - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries * - 1.4 - adds SET_LABEL_BO + * - 1.5 - adds JM_CTX_{CREATE,DESTROY} ioctls and extend SUBMIT to allow + * context creation with configurable priorities/affinity */ static const struct drm_driver panfrost_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, @@ -711,7 +849,7 @@ static const struct drm_driver panfrost_drm_driver = { .name = "panfrost", .desc = "panfrost DRM", .major = 1, - .minor = 4, + .minor = 5, .gem_create_object = panfrost_gem_create_object, .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, @@ -723,15 +861,12 @@ static const struct drm_driver panfrost_drm_driver = { static int panfrost_probe(struct platform_device *pdev) { struct panfrost_device *pfdev; - struct drm_device *ddev; int err; - pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL); - if (!pfdev) - return -ENOMEM; - - pfdev->pdev = pdev; - pfdev->dev = &pdev->dev; + pfdev = devm_drm_dev_alloc(&pdev->dev, &panfrost_drm_driver, + struct panfrost_device, base); + if (IS_ERR(pfdev)) + return PTR_ERR(pfdev); platform_set_drvdata(pdev, pfdev); @@ -741,14 +876,6 @@ static int panfrost_probe(struct platform_device *pdev) pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT; - /* Allocate and initialize the DRM device. */ - ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev); - if (IS_ERR(ddev)) - return PTR_ERR(ddev); - - ddev->dev_private = pfdev; - pfdev->ddev = ddev; - mutex_init(&pfdev->shrinker_lock); INIT_LIST_HEAD(&pfdev->shrinker_list); @@ -759,51 +886,47 @@ static int panfrost_probe(struct platform_device *pdev) goto err_out0; } - pm_runtime_set_active(pfdev->dev); - pm_runtime_mark_last_busy(pfdev->dev); - pm_runtime_enable(pfdev->dev); - pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ - pm_runtime_use_autosuspend(pfdev->dev); + pm_runtime_set_active(pfdev->base.dev); + pm_runtime_mark_last_busy(pfdev->base.dev); + pm_runtime_enable(pfdev->base.dev); + pm_runtime_set_autosuspend_delay(pfdev->base.dev, 50); /* ~3 frames */ + pm_runtime_use_autosuspend(pfdev->base.dev); /* * Register the DRM device with the core and the connectors with * sysfs */ - err = drm_dev_register(ddev, 0); + err = drm_dev_register(&pfdev->base, 0); if (err < 0) goto err_out1; - err = panfrost_gem_shrinker_init(ddev); + err = panfrost_gem_shrinker_init(&pfdev->base); if (err) goto err_out2; return 0; err_out2: - drm_dev_unregister(ddev); + drm_dev_unregister(&pfdev->base); err_out1: - pm_runtime_disable(pfdev->dev); + pm_runtime_disable(pfdev->base.dev); panfrost_device_fini(pfdev); - pm_runtime_set_suspended(pfdev->dev); + pm_runtime_set_suspended(pfdev->base.dev); err_out0: - drm_dev_put(ddev); return err; } static void panfrost_remove(struct platform_device *pdev) { struct panfrost_device *pfdev = platform_get_drvdata(pdev); - struct drm_device *ddev = pfdev->ddev; - drm_dev_unregister(ddev); - panfrost_gem_shrinker_cleanup(ddev); + drm_dev_unregister(&pfdev->base); + panfrost_gem_shrinker_cleanup(&pfdev->base); - pm_runtime_get_sync(pfdev->dev); - pm_runtime_disable(pfdev->dev); + pm_runtime_get_sync(pfdev->base.dev); + pm_runtime_disable(pfdev->base.dev); panfrost_device_fini(pfdev); - pm_runtime_set_suspended(pfdev->dev); - - drm_dev_put(ddev); + pm_runtime_set_suspended(pfdev->base.dev); } static ssize_t profiling_show(struct device *dev, diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c index 4042afe2fbf4..3ed6c902d0a1 100644 --- a/drivers/gpu/drm/panfrost/panfrost_dump.c +++ b/drivers/gpu/drm/panfrost/panfrost_dump.c @@ -163,7 +163,7 @@ void panfrost_core_dump(struct panfrost_job *job) iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (!iter.start) { - dev_warn(pfdev->dev, "failed to allocate devcoredump file\n"); + dev_warn(pfdev->base.dev, "failed to allocate devcoredump file\n"); return; } @@ -204,14 +204,14 @@ void panfrost_core_dump(struct panfrost_job *job) mapping = job->mappings[i]; if (!bo->base.sgt) { - dev_err(pfdev->dev, "Panfrost Dump: BO has no sgt, cannot dump\n"); + dev_err(pfdev->base.dev, "Panfrost Dump: BO has no sgt, cannot dump\n"); iter.hdr->bomap.valid = 0; goto dump_header; } ret = drm_gem_vmap(&bo->base.base, &map); if (ret) { - dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n"); + dev_err(pfdev->base.dev, "Panfrost Dump: couldn't map Buffer Object\n"); iter.hdr->bomap.valid = 0; goto dump_header; } @@ -237,5 +237,5 @@ dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data + } panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_TRAILER, iter.data); - dev_coredumpv(pfdev->dev, iter.start, iter.data - iter.start, GFP_KERNEL); + dev_coredumpv(pfdev->base.dev, iter.start, iter.data - iter.start, GFP_KERNEL); } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 85d6289a6eda..8041b65c6609 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -8,6 +8,7 @@ #include <linux/dma-mapping.h> #include <drm/panfrost_drm.h> +#include <drm/drm_print.h> #include "panfrost_device.h" #include "panfrost_gem.h" #include "panfrost_mmu.h" @@ -26,7 +27,7 @@ static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev, static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) { - struct panfrost_device *pfdev = bo->base.base.dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(bo->base.base.dev); if (list_empty(&bo->debugfs.node)) return; @@ -48,7 +49,7 @@ static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {} static void panfrost_gem_free_object(struct drm_gem_object *obj) { struct panfrost_gem_object *bo = to_panfrost_bo(obj); - struct panfrost_device *pfdev = obj->dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(obj->dev); /* * Make sure the BO is no longer inserted in the shrinker list before @@ -76,7 +77,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) for (i = 0; i < n_sgt; i++) { if (bo->sgts[i].sgl) { - dma_unmap_sgtable(pfdev->dev, &bo->sgts[i], + dma_unmap_sgtable(pfdev->base.dev, &bo->sgts[i], DMA_BIDIRECTIONAL, 0); sg_free_table(&bo->sgts[i]); } @@ -284,7 +285,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { */ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct panfrost_gem_object *obj; obj = kzalloc(sizeof(*obj), GFP_KERNEL); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 02b60ea1433a..2fe967a90bcb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -97,7 +97,7 @@ panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) */ int panfrost_gem_shrinker_init(struct drm_device *dev) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); pfdev->shrinker = shrinker_alloc(0, "drm-panfrost"); if (!pfdev->shrinker) @@ -120,7 +120,7 @@ int panfrost_gem_shrinker_init(struct drm_device *dev) */ void panfrost_gem_shrinker_cleanup(struct drm_device *dev) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); if (pfdev->shrinker) shrinker_free(pfdev->shrinker); diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 174e190ba40f..483d278eb154 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -12,6 +12,8 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <drm/drm_print.h> + #include "panfrost_device.h" #include "panfrost_features.h" #include "panfrost_issues.h" @@ -36,12 +38,12 @@ static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data) u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32; address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO); - dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n", + dev_warn(pfdev->base.dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n", fault_status, panfrost_exception_name(fault_status & 0xFF), address); if (state & GPU_IRQ_MULTIPLE_FAULT) - dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n"); + dev_warn(pfdev->base.dev, "There were multiple GPU faults - some have not been reported\n"); gpu_write(pfdev, GPU_INT_MASK, 0); } @@ -72,13 +74,13 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000); if (ret) { - dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n"); + dev_err(pfdev->base.dev, "gpu soft reset timed out, attempting hard reset\n"); gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET); ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000); if (ret) { - dev_err(pfdev->dev, "gpu hard reset timed out\n"); + dev_err(pfdev->base.dev, "gpu hard reset timed out\n"); return ret; } } @@ -95,7 +97,7 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) * All in-flight jobs should have released their cycle * counter references upon reset, but let us make sure */ - if (drm_WARN_ON(pfdev->ddev, atomic_read(&pfdev->cycle_counter.use_count) != 0)) + if (drm_WARN_ON(&pfdev->base, atomic_read(&pfdev->cycle_counter.use_count) != 0)) atomic_set(&pfdev->cycle_counter.use_count, 0); return 0; @@ -240,9 +242,10 @@ static const struct panfrost_model gpu_models[] = { /* MediaTek MT8188 Mali-G57 MC3 */ GPU_MODEL(g57, 0x9093, GPU_REV(g57, 0, 0)), + {0}, }; -static void panfrost_gpu_init_features(struct panfrost_device *pfdev) +static int panfrost_gpu_init_features(struct panfrost_device *pfdev) { u32 gpu_id, num_js, major, minor, status, rev; const char *name = "unknown"; @@ -327,16 +330,22 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) break; } + if (!model->name) { + dev_err(pfdev->base.dev, "GPU model not found: mali-%s id rev %#x %#x\n", + name, gpu_id, rev); + return -ENODEV; + } + bitmap_from_u64(pfdev->features.hw_features, hw_feat); bitmap_from_u64(pfdev->features.hw_issues, hw_issues); - dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", + dev_info(pfdev->base.dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", name, gpu_id, major, minor, status); - dev_info(pfdev->dev, "features: %64pb, issues: %64pb", + dev_info(pfdev->base.dev, "features: %64pb, issues: %64pb", pfdev->features.hw_features, pfdev->features.hw_issues); - dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x", + dev_info(pfdev->base.dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x", pfdev->features.l2_features, pfdev->features.core_features, pfdev->features.tiler_features, @@ -345,8 +354,10 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) pfdev->features.as_present, pfdev->features.js_present); - dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx", + dev_info(pfdev->base.dev, "shader_present=0x%0llx l2_present=0x%0llx", pfdev->features.shader_present, pfdev->features.l2_present); + + return 0; } void panfrost_cycle_counter_get(struct panfrost_device *pfdev) @@ -411,7 +422,7 @@ static u64 panfrost_get_core_mask(struct panfrost_device *pfdev) */ core_mask = ~(pfdev->features.l2_present - 1) & (pfdev->features.l2_present - 2); - dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n", + dev_info_once(pfdev->base.dev, "using only 1st core group (%lu cores from %lu)\n", hweight64(core_mask), hweight64(pfdev->features.shader_present)); @@ -432,7 +443,7 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) val, val == (pfdev->features.l2_present & core_mask), 10, 20000); if (ret) - dev_err(pfdev->dev, "error powering up gpu L2"); + dev_err(pfdev->base.dev, "error powering up gpu L2"); gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present & core_mask); @@ -440,13 +451,13 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) val, val == (pfdev->features.shader_present & core_mask), 10, 20000); if (ret) - dev_err(pfdev->dev, "error powering up gpu shader"); + dev_err(pfdev->base.dev, "error powering up gpu shader"); gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO, val, val == pfdev->features.tiler_present, 10, 1000); if (ret) - dev_err(pfdev->dev, "error powering up gpu tiler"); + dev_err(pfdev->base.dev, "error powering up gpu tiler"); } void panfrost_gpu_power_off(struct panfrost_device *pfdev) @@ -458,19 +469,19 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev) ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO, val, !val, 1, 2000); if (ret) - dev_err(pfdev->dev, "shader power transition timeout"); + dev_err(pfdev->base.dev, "shader power transition timeout"); gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO, val, !val, 1, 2000); if (ret) - dev_err(pfdev->dev, "tiler power transition timeout"); + dev_err(pfdev->base.dev, "tiler power transition timeout"); gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present); ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO, val, !val, 0, 2000); if (ret) - dev_err(pfdev->dev, "l2 power transition timeout"); + dev_err(pfdev->base.dev, "l2 power transition timeout"); } void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev) @@ -489,23 +500,26 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) if (err) return err; - panfrost_gpu_init_features(pfdev); + err = panfrost_gpu_init_features(pfdev); + if (err) + return err; - err = dma_set_mask_and_coherent(pfdev->dev, - DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); + err = dma_set_mask_and_coherent(pfdev->base.dev, + DMA_BIT_MASK(FIELD_GET(0xff00, + pfdev->features.mmu_features))); if (err) return err; - dma_set_max_seg_size(pfdev->dev, UINT_MAX); + dma_set_max_seg_size(pfdev->base.dev, UINT_MAX); - pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); + pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "gpu"); if (pfdev->gpu_irq < 0) return pfdev->gpu_irq; - err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler, + err = devm_request_irq(pfdev->base.dev, pfdev->gpu_irq, panfrost_gpu_irq_handler, IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev); if (err) { - dev_err(pfdev->dev, "failed to request gpu irq"); + dev_err(pfdev->base.dev, "failed to request gpu irq"); return err; } @@ -525,9 +539,9 @@ u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev) if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) { /* Flush reduction only makes sense when the GPU is kept powered on between jobs */ - if (pm_runtime_get_if_in_use(pfdev->dev)) { + if (pm_runtime_get_if_in_use(pfdev->base.dev)) { flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID); - pm_runtime_put(pfdev->dev); + pm_runtime_put(pfdev->base.dev); return flush_id; } } diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 82acabb21b27..11894a6b9fcc 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -22,11 +22,16 @@ #include "panfrost_mmu.h" #include "panfrost_dump.h" +#define MAX_JM_CTX_PER_FILE 64 #define JOB_TIMEOUT_MS 500 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) #define job_read(dev, reg) readl(dev->iomem + (reg)) +const char * const panfrost_engine_names[] = { + "fragment", "vertex-tiler", "compute-only" +}; + struct panfrost_queue_state { struct drm_gpu_scheduler sched; u64 fence_context; @@ -94,7 +99,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in if (!fence) return ERR_PTR(-ENOMEM); - fence->dev = pfdev->ddev; + fence->dev = &pfdev->base; fence->queue = js_num; fence->seqno = ++js->queue[js_num].emit_seqno; dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock, @@ -195,7 +200,7 @@ panfrost_enqueue_job(struct panfrost_device *pfdev, int slot, return 1; } -static void panfrost_job_hw_submit(struct panfrost_job *job, int js) +static int panfrost_job_hw_submit(struct panfrost_job *job, int js) { struct panfrost_device *pfdev = job->pfdev; unsigned int subslot; @@ -203,17 +208,22 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) u64 jc_head = job->jc; int ret; - panfrost_devfreq_record_busy(&pfdev->pfdevfreq); - - ret = pm_runtime_get_sync(pfdev->dev); + ret = pm_runtime_get_sync(pfdev->base.dev); if (ret < 0) - return; + goto err_hwsubmit; if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) { - return; + ret = -EINVAL; + goto err_hwsubmit; } - cfg = panfrost_mmu_as_get(pfdev, job->mmu); + ret = panfrost_mmu_as_get(pfdev, job->mmu); + if (ret < 0) + goto err_hwsubmit; + + cfg = ret; + + panfrost_devfreq_record_busy(&pfdev->pfdevfreq); job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head)); job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head)); @@ -256,11 +266,17 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) } job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); - dev_dbg(pfdev->dev, + dev_dbg(pfdev->base.dev, "JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d", job, js, subslot, jc_head, cfg & 0xf); } spin_unlock(&pfdev->js->job_lock); + + return 0; + +err_hwsubmit: + pm_runtime_put_autosuspend(pfdev->base.dev); + return ret; } static int panfrost_acquire_object_fences(struct drm_gem_object **bos, @@ -359,6 +375,7 @@ static void panfrost_job_cleanup(struct kref *ref) kvfree(job->bos); } + panfrost_jm_ctx_put(job->ctx); kfree(job); } @@ -382,6 +399,10 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) struct panfrost_device *pfdev = job->pfdev; int slot = panfrost_job_get_slot(job); struct dma_fence *fence = NULL; + int ret; + + if (job->ctx->destroyed) + return ERR_PTR(-ECANCELED); if (unlikely(job->base.s_fence->finished.error)) return NULL; @@ -400,27 +421,27 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) dma_fence_put(job->done_fence); job->done_fence = dma_fence_get(fence); - panfrost_job_hw_submit(job, slot); + ret = panfrost_job_hw_submit(job, slot); + if (ret) { + dma_fence_put(fence); + return ERR_PTR(ret); + } return fence; } -void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) +void panfrost_jm_reset_interrupts(struct panfrost_device *pfdev) { - int j; - u32 irq_mask = 0; + job_write(pfdev, JOB_INT_CLEAR, ALL_JS_INT_MASK); +} +void panfrost_jm_enable_interrupts(struct panfrost_device *pfdev) +{ clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended); - - for (j = 0; j < NUM_JOB_SLOTS; j++) { - irq_mask |= MK_JS_MASK(j); - } - - job_write(pfdev, JOB_INT_CLEAR, irq_mask); - job_write(pfdev, JOB_INT_MASK, irq_mask); + job_write(pfdev, JOB_INT_MASK, ALL_JS_INT_MASK); } -void panfrost_job_suspend_irq(struct panfrost_device *pfdev) +void panfrost_jm_suspend_irq(struct panfrost_device *pfdev) { set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended); @@ -437,12 +458,12 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev, bool signal_fence = true; if (!panfrost_exception_is_fault(js_status)) { - dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x", + dev_dbg(pfdev->base.dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x", js, exception_name, job_read(pfdev, JS_HEAD_LO(js)), job_read(pfdev, JS_TAIL_LO(js))); } else { - dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", + dev_err(pfdev->base.dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", js, exception_name, job_read(pfdev, JS_HEAD_LO(js)), job_read(pfdev, JS_TAIL_LO(js))); @@ -474,7 +495,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev, if (signal_fence) dma_fence_signal_locked(job->done_fence); - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); if (panfrost_exception_needs_reset(pfdev, js_status)) { atomic_set(&pfdev->reset.pending, 1); @@ -482,8 +503,8 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev, } } -static void panfrost_job_handle_done(struct panfrost_device *pfdev, - struct panfrost_job *job) +static void panfrost_jm_handle_done(struct panfrost_device *pfdev, + struct panfrost_job *job) { /* Set ->jc to 0 to avoid re-submitting an already finished job (can * happen when we receive the DONE interrupt while doing a GPU reset). @@ -493,10 +514,10 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev, panfrost_devfreq_record_idle(&pfdev->pfdevfreq); dma_fence_signal_locked(job->done_fence); - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); } -static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) +static void panfrost_jm_handle_irq(struct panfrost_device *pfdev, u32 status) { struct panfrost_job *done[NUM_JOB_SLOTS][2] = {}; struct panfrost_job *failed[NUM_JOB_SLOTS] = {}; @@ -571,7 +592,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) } for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++) - panfrost_job_handle_done(pfdev, done[j][i]); + panfrost_jm_handle_done(pfdev, done[j][i]); } /* And finally we requeue jobs that were waiting in the second slot @@ -589,7 +610,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j); dma_fence_set_error(canceled->done_fence, -ECANCELED); - panfrost_job_handle_done(pfdev, canceled); + panfrost_jm_handle_done(pfdev, canceled); } else if (!atomic_read(&pfdev->reset.pending)) { /* Requeue the job we removed if no reset is pending */ job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START); @@ -597,15 +618,15 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) } } -static void panfrost_job_handle_irqs(struct panfrost_device *pfdev) +static void panfrost_jm_handle_irqs(struct panfrost_device *pfdev) { u32 status = job_read(pfdev, JOB_INT_RAWSTAT); while (status) { - pm_runtime_mark_last_busy(pfdev->dev); + pm_runtime_mark_last_busy(pfdev->base.dev); spin_lock(&pfdev->js->job_lock); - panfrost_job_handle_irq(pfdev, status); + panfrost_jm_handle_irq(pfdev, status); spin_unlock(&pfdev->js->job_lock); status = job_read(pfdev, JOB_INT_RAWSTAT); } @@ -683,10 +704,10 @@ panfrost_reset(struct panfrost_device *pfdev, 10, 10000); if (ret) - dev_err(pfdev->dev, "Soft-stop failed\n"); + dev_err(pfdev->base.dev, "Soft-stop failed\n"); /* Handle the remaining interrupts before we reset. */ - panfrost_job_handle_irqs(pfdev); + panfrost_jm_handle_irqs(pfdev); /* Remaining interrupts have been handled, but we might still have * stuck jobs. Let's make sure the PM counters stay balanced by @@ -701,7 +722,7 @@ panfrost_reset(struct panfrost_device *pfdev, if (pfdev->jobs[i][j]->requirements & PANFROST_JD_REQ_CYCLE_COUNT || pfdev->jobs[i][j]->is_profiled) panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev); - pm_runtime_put_noidle(pfdev->dev); + pm_runtime_put_noidle(pfdev->base.dev); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); } } @@ -709,12 +730,7 @@ panfrost_reset(struct panfrost_device *pfdev, spin_unlock(&pfdev->js->job_lock); /* Proceed with reset now. */ - panfrost_device_reset(pfdev); - - /* panfrost_device_reset() unmasks job interrupts, but we want to - * keep them masked a bit longer. - */ - job_write(pfdev, JOB_INT_MASK, 0); + panfrost_device_reset(pfdev, false); /* GPU has been reset, we can clear the reset pending bit. */ atomic_set(&pfdev->reset.pending, 0); @@ -736,9 +752,7 @@ panfrost_reset(struct panfrost_device *pfdev, drm_sched_start(&pfdev->js->queue[i].sched, 0); /* Re-enable job interrupts now that everything has been restarted. */ - job_write(pfdev, JOB_INT_MASK, - GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | - GENMASK(NUM_JOB_SLOTS - 1, 0)); + panfrost_jm_enable_interrupts(pfdev); dma_fence_end_signalling(cookie); } @@ -769,11 +783,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job synchronize_irq(pfdev->js->irq); if (dma_fence_is_signaled(job->done_fence)) { - dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n"); + dev_warn(pfdev->base.dev, "unexpectedly high interrupt latency\n"); return DRM_GPU_SCHED_STAT_NO_HANG; } - dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", + dev_err(pfdev->base.dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", js, job_read(pfdev, JS_CONFIG(js)), job_read(pfdev, JS_STATUS(js)), @@ -803,22 +817,20 @@ static const struct drm_sched_backend_ops panfrost_sched_ops = { .free_job = panfrost_job_free }; -static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data) +static irqreturn_t panfrost_jm_irq_handler_thread(int irq, void *data) { struct panfrost_device *pfdev = data; - panfrost_job_handle_irqs(pfdev); + panfrost_jm_handle_irqs(pfdev); /* Enable interrupts only if we're not about to get suspended */ if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended)) - job_write(pfdev, JOB_INT_MASK, - GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | - GENMASK(NUM_JOB_SLOTS - 1, 0)); + job_write(pfdev, JOB_INT_MASK, ALL_JS_INT_MASK); return IRQ_HANDLED; } -static irqreturn_t panfrost_job_irq_handler(int irq, void *data) +static irqreturn_t panfrost_jm_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; u32 status; @@ -834,19 +846,20 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) return IRQ_WAKE_THREAD; } -int panfrost_job_init(struct panfrost_device *pfdev) +int panfrost_jm_init(struct panfrost_device *pfdev) { struct drm_sched_init_args args = { .ops = &panfrost_sched_ops, .num_rqs = DRM_SCHED_PRIORITY_COUNT, .credit_limit = 2, .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), - .name = "pan_js", - .dev = pfdev->dev, + .dev = pfdev->base.dev, }; struct panfrost_job_slot *js; int ret, j; + BUILD_BUG_ON(ARRAY_SIZE(panfrost_engine_names) != NUM_JOB_SLOTS); + /* All GPUs have two entries per queue, but without jobchain * disambiguation stopping the right job in the close path is tricky, * so let's just advertise one entry in that case. @@ -854,24 +867,25 @@ int panfrost_job_init(struct panfrost_device *pfdev) if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) args.credit_limit = 1; - pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); + js = devm_kzalloc(pfdev->base.dev, sizeof(*js), GFP_KERNEL); if (!js) return -ENOMEM; + pfdev->js = js; INIT_WORK(&pfdev->reset.work, panfrost_reset_work); spin_lock_init(&js->job_lock); - js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job"); + js->irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "job"); if (js->irq < 0) return js->irq; - ret = devm_request_threaded_irq(pfdev->dev, js->irq, - panfrost_job_irq_handler, - panfrost_job_irq_handler_thread, + ret = devm_request_threaded_irq(pfdev->base.dev, js->irq, + panfrost_jm_irq_handler, + panfrost_jm_irq_handler_thread, IRQF_SHARED, KBUILD_MODNAME "-job", pfdev); if (ret) { - dev_err(pfdev->dev, "failed to request job irq"); + dev_err(pfdev->base.dev, "failed to request job irq"); return ret; } @@ -882,15 +896,17 @@ int panfrost_job_init(struct panfrost_device *pfdev) for (j = 0; j < NUM_JOB_SLOTS; j++) { js->queue[j].fence_context = dma_fence_context_alloc(1); + args.name = panfrost_engine_names[j]; ret = drm_sched_init(&js->queue[j].sched, &args); if (ret) { - dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); + dev_err(pfdev->base.dev, "Failed to create scheduler: %d.", ret); goto err_sched; } } - panfrost_job_enable_interrupts(pfdev); + panfrost_jm_reset_interrupts(pfdev); + panfrost_jm_enable_interrupts(pfdev); return 0; @@ -902,7 +918,7 @@ err_sched: return ret; } -void panfrost_job_fini(struct panfrost_device *pfdev) +void panfrost_jm_fini(struct panfrost_device *pfdev) { struct panfrost_job_slot *js = pfdev->js; int j; @@ -917,39 +933,176 @@ void panfrost_job_fini(struct panfrost_device *pfdev) destroy_workqueue(pfdev->reset.wq); } -int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) +int panfrost_jm_open(struct drm_file *file) +{ + struct panfrost_file_priv *panfrost_priv = file->driver_priv; + int ret; + + struct drm_panfrost_jm_ctx_create default_jm_ctx = { + .priority = PANFROST_JM_CTX_PRIORITY_MEDIUM, + }; + + xa_init_flags(&panfrost_priv->jm_ctxs, XA_FLAGS_ALLOC); + + ret = panfrost_jm_ctx_create(file, &default_jm_ctx); + if (ret) + return ret; + + /* We expect the default context to be assigned handle 0. */ + if (WARN_ON(default_jm_ctx.handle)) + return -EINVAL; + + return 0; +} + +void panfrost_jm_close(struct drm_file *file) +{ + struct panfrost_file_priv *panfrost_priv = file->driver_priv; + struct panfrost_jm_ctx *jm_ctx; + unsigned long i; + + xa_for_each(&panfrost_priv->jm_ctxs, i, jm_ctx) + panfrost_jm_ctx_destroy(file, i); + + xa_destroy(&panfrost_priv->jm_ctxs); +} + +int panfrost_jm_is_idle(struct panfrost_device *pfdev) { - struct panfrost_device *pfdev = panfrost_priv->pfdev; struct panfrost_job_slot *js = pfdev->js; - struct drm_gpu_scheduler *sched; - int ret, i; + int i; for (i = 0; i < NUM_JOB_SLOTS; i++) { - sched = &js->queue[i].sched; - ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], - DRM_SCHED_PRIORITY_NORMAL, &sched, - 1, NULL); - if (WARN_ON(ret)) - return ret; + /* If there are any jobs in the HW queue, we're not idle */ + if (atomic_read(&js->queue[i].sched.credit_count)) + return false; + } + + return true; +} + +static void panfrost_jm_ctx_release(struct kref *kref) +{ + struct panfrost_jm_ctx *jm_ctx = container_of(kref, struct panfrost_jm_ctx, refcnt); + + WARN_ON(!jm_ctx->destroyed); + + for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++) + drm_sched_entity_destroy(&jm_ctx->slot_entity[i]); + + kfree(jm_ctx); +} + +void +panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx) +{ + if (jm_ctx) + kref_put(&jm_ctx->refcnt, panfrost_jm_ctx_release); +} + +struct panfrost_jm_ctx * +panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx) +{ + if (jm_ctx) + kref_get(&jm_ctx->refcnt); + + return jm_ctx; +} + +struct panfrost_jm_ctx * +panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle) +{ + struct panfrost_file_priv *priv = file->driver_priv; + struct panfrost_jm_ctx *jm_ctx; + + xa_lock(&priv->jm_ctxs); + jm_ctx = panfrost_jm_ctx_get(xa_load(&priv->jm_ctxs, handle)); + xa_unlock(&priv->jm_ctxs); + + return jm_ctx; +} + +static int jm_ctx_prio_to_drm_sched_prio(struct drm_file *file, + enum drm_panfrost_jm_ctx_priority in, + enum drm_sched_priority *out) +{ + switch (in) { + case PANFROST_JM_CTX_PRIORITY_LOW: + *out = DRM_SCHED_PRIORITY_LOW; + return 0; + case PANFROST_JM_CTX_PRIORITY_MEDIUM: + *out = DRM_SCHED_PRIORITY_NORMAL; + return 0; + case PANFROST_JM_CTX_PRIORITY_HIGH: + if (!panfrost_high_prio_allowed(file)) + return -EACCES; + + *out = DRM_SCHED_PRIORITY_HIGH; + return 0; + default: + return -EINVAL; + } +} + +int panfrost_jm_ctx_create(struct drm_file *file, + struct drm_panfrost_jm_ctx_create *args) +{ + struct panfrost_file_priv *priv = file->driver_priv; + struct panfrost_device *pfdev = priv->pfdev; + enum drm_sched_priority sched_prio; + struct panfrost_jm_ctx *jm_ctx; + int ret; + + jm_ctx = kzalloc(sizeof(*jm_ctx), GFP_KERNEL); + if (!jm_ctx) + return -ENOMEM; + + kref_init(&jm_ctx->refcnt); + + ret = jm_ctx_prio_to_drm_sched_prio(file, args->priority, &sched_prio); + if (ret) + goto err_put_jm_ctx; + + for (u32 i = 0; i < NUM_JOB_SLOTS; i++) { + struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched; + + ret = drm_sched_entity_init(&jm_ctx->slot_entity[i], sched_prio, + &sched, 1, NULL); + if (ret) + goto err_put_jm_ctx; } + + ret = xa_alloc(&priv->jm_ctxs, &args->handle, jm_ctx, + XA_LIMIT(0, MAX_JM_CTX_PER_FILE), GFP_KERNEL); + if (ret) + goto err_put_jm_ctx; + return 0; + +err_put_jm_ctx: + jm_ctx->destroyed = true; + panfrost_jm_ctx_put(jm_ctx); + return ret; } -void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) +int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle) { - struct panfrost_device *pfdev = panfrost_priv->pfdev; - int i; + struct panfrost_file_priv *priv = file->driver_priv; + struct panfrost_device *pfdev = priv->pfdev; + struct panfrost_jm_ctx *jm_ctx; - for (i = 0; i < NUM_JOB_SLOTS; i++) - drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); + jm_ctx = xa_erase(&priv->jm_ctxs, handle); + if (!jm_ctx) + return -EINVAL; + + jm_ctx->destroyed = true; /* Kill in-flight jobs */ spin_lock(&pfdev->js->job_lock); - for (i = 0; i < NUM_JOB_SLOTS; i++) { - struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i]; - int j; + for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++) { + struct drm_sched_entity *entity = &jm_ctx->slot_entity[i]; - for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) { + for (int j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) { struct panfrost_job *job = pfdev->jobs[i][j]; u32 cmd; @@ -980,18 +1133,7 @@ void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) } } spin_unlock(&pfdev->js->job_lock); -} - -int panfrost_job_is_idle(struct panfrost_device *pfdev) -{ - struct panfrost_job_slot *js = pfdev->js; - int i; - - for (i = 0; i < NUM_JOB_SLOTS; i++) { - /* If there are any jobs in the HW queue, we're not idle */ - if (atomic_read(&js->queue[i].sched.credit_count)) - return false; - } - return true; + panfrost_jm_ctx_put(jm_ctx); + return 0; } diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index ec581b97852b..c3f57e41a571 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -18,6 +18,7 @@ struct panfrost_job { struct panfrost_device *pfdev; struct panfrost_mmu *mmu; + struct panfrost_jm_ctx *ctx; /* Fence to be signaled by IRQ handler when the job is complete. */ struct dma_fence *done_fence; @@ -39,15 +40,38 @@ struct panfrost_job { u64 start_cycles; }; -int panfrost_job_init(struct panfrost_device *pfdev); -void panfrost_job_fini(struct panfrost_device *pfdev); -int panfrost_job_open(struct panfrost_file_priv *panfrost_priv); -void panfrost_job_close(struct panfrost_file_priv *panfrost_priv); +struct panfrost_js_ctx { + struct drm_sched_entity sched_entity; + bool enabled; +}; + +#define NUM_JOB_SLOTS 3 + +struct panfrost_jm_ctx { + struct kref refcnt; + bool destroyed; + struct drm_sched_entity slot_entity[NUM_JOB_SLOTS]; +}; + +extern const char * const panfrost_engine_names[]; + +int panfrost_jm_ctx_create(struct drm_file *file, + struct drm_panfrost_jm_ctx_create *args); +int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle); +void panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx); +struct panfrost_jm_ctx *panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx); +struct panfrost_jm_ctx *panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle); + +int panfrost_jm_init(struct panfrost_device *pfdev); +void panfrost_jm_fini(struct panfrost_device *pfdev); +int panfrost_jm_open(struct drm_file *file); +void panfrost_jm_close(struct drm_file *file); +void panfrost_jm_reset_interrupts(struct panfrost_device *pfdev); +void panfrost_jm_enable_interrupts(struct panfrost_device *pfdev); +void panfrost_jm_suspend_irq(struct panfrost_device *pfdev); +int panfrost_jm_is_idle(struct panfrost_device *pfdev); int panfrost_job_get_slot(struct panfrost_job *job); int panfrost_job_push(struct panfrost_job *job); void panfrost_job_put(struct panfrost_job *job); -void panfrost_job_enable_interrupts(struct panfrost_device *pfdev); -void panfrost_job_suspend_irq(struct panfrost_device *pfdev); -int panfrost_job_is_idle(struct panfrost_device *pfdev); #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index f6b91c052cfb..8f3b7a7b6ad0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -2,6 +2,7 @@ /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ #include <drm/panfrost_drm.h> +#include <drm/drm_print.h> #include <linux/atomic.h> #include <linux/bitfield.h> @@ -81,7 +82,7 @@ static int wait_ready(struct panfrost_device *pfdev, u32 as_nr) if (ret) { /* The GPU hung, let's trigger a reset */ panfrost_device_schedule_reset(pfdev); - dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n"); + dev_err(pfdev->base.dev, "AS_ACTIVE bit stuck\n"); } return ret; @@ -222,7 +223,7 @@ static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu) struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg; struct panfrost_device *pfdev = mmu->pfdev; - if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr & + if (drm_WARN_ON(&pfdev->base, pgtbl_cfg->arm_lpae_s1_cfg.ttbr & ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK)) return -EINVAL; @@ -253,12 +254,12 @@ static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu, return mmu_cfg_init_mali_lpae(mmu); default: /* This should never happen */ - drm_WARN(pfdev->ddev, 1, "Invalid pgtable format"); + drm_WARN(&pfdev->base, 1, "Invalid pgtable format"); return -EINVAL; } } -u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) +int panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) { int as; @@ -300,7 +301,10 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) if (!atomic_read(&lru_mmu->as_count)) break; } - WARN_ON(&lru_mmu->list == &pfdev->as_lru_list); + if (WARN_ON(&lru_mmu->list == &pfdev->as_lru_list)) { + as = -EBUSY; + goto out; + } list_del_init(&lru_mmu->list); as = lru_mmu->as; @@ -315,7 +319,9 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) atomic_set(&mmu->as_count, 1); list_add(&mmu->list, &pfdev->as_lru_list); - dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask); + dev_dbg(pfdev->base.dev, + "Assigned AS%d to mmu %p, alloc_mask=%lx", + as, mmu, pfdev->as_alloc_mask); panfrost_mmu_enable(pfdev, mmu); @@ -381,13 +387,30 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev, if (mmu->as < 0) return; - pm_runtime_get_noresume(pfdev->dev); + pm_runtime_get_noresume(pfdev->base.dev); /* Flush the PTs only if we're already awake */ - if (pm_runtime_active(pfdev->dev)) + if (pm_runtime_active(pfdev->base.dev)) mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); +} + +static void mmu_unmap_range(struct panfrost_mmu *mmu, u64 iova, size_t len) +{ + struct io_pgtable_ops *ops = mmu->pgtbl_ops; + size_t pgsize, unmapped_len = 0; + size_t unmapped_page, pgcount; + + while (unmapped_len < len) { + pgsize = get_pgsize(iova, len - unmapped_len, &pgcount); + + unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL); + WARN_ON(unmapped_page != pgsize * pgcount); + + iova += pgsize * pgcount; + unmapped_len += pgsize * pgcount; + } } static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, @@ -396,22 +419,30 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, unsigned int count; struct scatterlist *sgl; struct io_pgtable_ops *ops = mmu->pgtbl_ops; + size_t total_mapped = 0; u64 start_iova = iova; + int ret; for_each_sgtable_dma_sg(sgt, sgl, count) { unsigned long paddr = sg_dma_address(sgl); size_t len = sg_dma_len(sgl); - dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len); + dev_dbg(pfdev->base.dev, + "map: as=%d, iova=%llx, paddr=%lx, len=%zx", + mmu->as, iova, paddr, len); while (len) { size_t pgcount, mapped = 0; size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); - ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, + ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_KERNEL, &mapped); + if (ret) + goto err_unmap_pages; + /* Don't get stuck if things have gone wrong */ mapped = max(mapped, pgsize); + total_mapped += mapped; iova += mapped; paddr += mapped; len -= mapped; @@ -421,6 +452,10 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova); return 0; + +err_unmap_pages: + mmu_unmap_range(mmu, start_iova, total_mapped); + return ret; } int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) @@ -431,6 +466,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct sg_table *sgt; int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE; + int ret; if (WARN_ON(mapping->active)) return 0; @@ -442,11 +478,18 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); - mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, - prot, sgt); + ret = mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, + prot, sgt); + if (ret) + goto err_put_pages; + mapping->active = true; return 0; + +err_put_pages: + drm_gem_shmem_put_pages_locked(shmem); + return ret; } void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) @@ -462,7 +505,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) if (WARN_ON(!mapping->active)) return; - dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", + dev_dbg(pfdev->base.dev, "unmap: as=%d, iova=%llx, len=%zx", mapping->mmu->as, iova, len); while (unmapped_len < len) { @@ -559,7 +602,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, bo = bomapping->obj; if (!bo->is_heap) { - dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", + dev_WARN(pfdev->base.dev, "matching BO is not heap type (GPU VA = %llx)", bomapping->mmnode.start << PAGE_SHIFT); ret = -EINVAL; goto err_bo; @@ -595,10 +638,12 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, refcount_set(&bo->base.pages_use_count, 1); } else { pages = bo->base.pages; - if (pages[page_offset]) { - /* Pages are already mapped, bail out. */ - goto out; - } + } + + sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; + if (sgt->sgl) { + /* Pages are already mapped, bail out. */ + goto out; } mapping = bo->base.base.filp->f_mapping; @@ -620,23 +665,24 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, } } - sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; ret = sg_alloc_table_from_pages(sgt, pages + page_offset, NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); if (ret) goto err_unlock; - ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0); + ret = dma_map_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0); if (ret) goto err_map; - mmu_map_sg(pfdev, bomapping->mmu, addr, - IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt); + ret = mmu_map_sg(pfdev, bomapping->mmu, addr, + IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt); + if (ret) + goto err_mmu_map_sg; bomapping->active = true; bo->heap_rss_size += SZ_2M; - dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); + dev_dbg(pfdev->base.dev, "mapped page fault @ AS%d %llx", as, addr); out: dma_resv_unlock(obj->resv); @@ -645,6 +691,8 @@ out: return 0; +err_mmu_map_sg: + dma_unmap_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0); err_map: sg_free_table(sgt); err_unlock: @@ -662,13 +710,12 @@ static void panfrost_mmu_release_ctx(struct kref *kref) spin_lock(&pfdev->as_lock); if (mmu->as >= 0) { - pm_runtime_get_noresume(pfdev->dev); - if (pm_runtime_active(pfdev->dev)) + pm_runtime_get_noresume(pfdev->base.dev); + if (pm_runtime_active(pfdev->base.dev)) panfrost_mmu_disable(pfdev, mmu->as); - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); clear_bit(mmu->as, &pfdev->as_alloc_mask); - clear_bit(mmu->as, &pfdev->as_in_use_mask); list_del(&mmu->list); } spin_unlock(&pfdev->as_lock); @@ -726,7 +773,7 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev) if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) { if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) { - dev_err_once(pfdev->dev, + dev_err_once(pfdev->base.dev, "AARCH64_4K page table not supported\n"); return ERR_PTR(-EINVAL); } @@ -755,7 +802,7 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev) .oas = pa_bits, .coherent_walk = pfdev->coherent, .tlb = &mmu_tlb_ops, - .iommu_dev = pfdev->dev, + .iommu_dev = pfdev->base.dev, }; mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu); @@ -848,7 +895,7 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) if (ret) { /* terminal fault, print info about the fault */ - dev_err(pfdev->dev, + dev_err(pfdev->base.dev, "Unhandled Page fault in AS%d at VA 0x%016llX\n" "Reason: %s\n" "raw fault status: 0x%X\n" @@ -896,18 +943,18 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) { int err; - pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); + pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "mmu"); if (pfdev->mmu_irq < 0) return pfdev->mmu_irq; - err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq, + err = devm_request_threaded_irq(pfdev->base.dev, pfdev->mmu_irq, panfrost_mmu_irq_handler, panfrost_mmu_irq_handler_thread, IRQF_SHARED, KBUILD_MODNAME "-mmu", pfdev); if (err) { - dev_err(pfdev->dev, "failed to request mmu irq"); + dev_err(pfdev->base.dev, "failed to request mmu irq"); return err; } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h index 022a9a74a114..27c3c65ed074 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h @@ -4,6 +4,7 @@ #ifndef __PANFROST_MMU_H__ #define __PANFROST_MMU_H__ +struct panfrost_device; struct panfrost_gem_mapping; struct panfrost_file_priv; struct panfrost_mmu; @@ -16,7 +17,7 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev); void panfrost_mmu_reset(struct panfrost_device *pfdev); void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev); -u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); +int panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 0dd62e8b2fa7..7020c0192e18 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -84,11 +84,11 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, else if (perfcnt->user) return -EBUSY; - ret = pm_runtime_get_sync(pfdev->dev); + ret = pm_runtime_get_sync(pfdev->base.dev); if (ret < 0) goto err_put_pm; - bo = drm_gem_shmem_create(pfdev->ddev, perfcnt->bosize); + bo = drm_gem_shmem_create(&pfdev->base, perfcnt->bosize); if (IS_ERR(bo)) { ret = PTR_ERR(bo); goto err_put_pm; @@ -130,9 +130,11 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, goto err_vunmap; } - perfcnt->user = user; + ret = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu); + if (ret < 0) + goto err_vunmap; - as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu); + as = ret; cfg = GPU_PERFCNT_CFG_AS(as) | GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL); @@ -164,6 +166,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, /* The BO ref is retained by the mapping. */ drm_gem_object_put(&bo->base); + perfcnt->user = user; + return 0; err_vunmap: @@ -175,7 +179,7 @@ err_close_bo: err_put_bo: drm_gem_object_put(&bo->base); err_put_pm: - pm_runtime_put(pfdev->dev); + pm_runtime_put(pfdev->base.dev); return ret; } @@ -203,7 +207,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); panfrost_gem_mapping_put(perfcnt->mapping); perfcnt->mapping = NULL; - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); return 0; } @@ -211,7 +215,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_panfrost_perfcnt_enable *req = data; int ret; @@ -238,7 +242,7 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_panfrost_perfcnt_dump *req = data; void __user *user_ptr = (void __user *)(uintptr_t)req->buf_ptr; @@ -273,12 +277,12 @@ void panfrost_perfcnt_close(struct drm_file *file_priv) struct panfrost_device *pfdev = pfile->pfdev; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; - pm_runtime_get_sync(pfdev->dev); + pm_runtime_get_sync(pfdev->base.dev); mutex_lock(&perfcnt->lock); if (perfcnt->user == pfile) panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); } int panfrost_perfcnt_init(struct panfrost_device *pfdev) @@ -316,7 +320,7 @@ int panfrost_perfcnt_init(struct panfrost_device *pfdev) COUNTERS_PER_BLOCK * BYTES_PER_COUNTER; } - perfcnt = devm_kzalloc(pfdev->dev, sizeof(*perfcnt), GFP_KERNEL); + perfcnt = devm_kzalloc(pfdev->base.dev, sizeof(*perfcnt), GFP_KERNEL); if (!perfcnt) return -ENOMEM; diff --git a/drivers/gpu/drm/panthor/Makefile b/drivers/gpu/drm/panthor/Makefile index 02db21748c12..753a32c446df 100644 --- a/drivers/gpu/drm/panthor/Makefile +++ b/drivers/gpu/drm/panthor/Makefile @@ -10,6 +10,7 @@ panthor-y := \ panthor_heap.o \ panthor_hw.o \ panthor_mmu.o \ + panthor_pwr.o \ panthor_sched.o obj-$(CONFIG_DRM_PANTHOR) += panthor.o diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c index 3686515d368d..2249b41ca4af 100644 --- a/drivers/gpu/drm/panthor/panthor_devfreq.c +++ b/drivers/gpu/drm/panthor/panthor_devfreq.c @@ -8,6 +8,7 @@ #include <linux/pm_opp.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "panthor_devfreq.h" #include "panthor_device.h" @@ -62,7 +63,6 @@ static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq) static int panthor_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { - struct panthor_device *ptdev = dev_get_drvdata(dev); struct dev_pm_opp *opp; int err; @@ -72,8 +72,6 @@ static int panthor_devfreq_target(struct device *dev, unsigned long *freq, dev_pm_opp_put(opp); err = dev_pm_opp_set_rate(dev, *freq); - if (!err) - ptdev->current_frequency = *freq; return err; } @@ -115,11 +113,21 @@ static int panthor_devfreq_get_dev_status(struct device *dev, return 0; } +static int panthor_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct panthor_device *ptdev = dev_get_drvdata(dev); + + *freq = clk_get_rate(ptdev->clks.core); + + return 0; +} + static struct devfreq_dev_profile panthor_devfreq_profile = { .timer = DEVFREQ_TIMER_DELAYED, .polling_ms = 50, /* ~3 frames */ .target = panthor_devfreq_target, .get_dev_status = panthor_devfreq_get_dev_status, + .get_cur_freq = panthor_devfreq_get_cur_freq, }; int panthor_devfreq_init(struct panthor_device *ptdev) @@ -134,6 +142,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev) struct thermal_cooling_device *cooling; struct device *dev = ptdev->base.dev; struct panthor_devfreq *pdevfreq; + struct opp_table *table; struct dev_pm_opp *opp; unsigned long cur_freq; unsigned long freq = ULONG_MAX; @@ -145,18 +154,30 @@ int panthor_devfreq_init(struct panthor_device *ptdev) ptdev->devfreq = pdevfreq; - ret = devm_pm_opp_set_regulators(dev, reg_names); - if (ret) { - if (ret != -EPROBE_DEFER) - DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n"); - - return ret; + /* + * The power domain associated with the GPU may have already added an + * OPP table, complete with OPPs, as part of the platform bus + * initialization. If this is the case, the power domain is in charge of + * also controlling the performance, with a set_performance callback. + * Only add a new OPP table from DT if there isn't such a table present + * already. + */ + table = dev_pm_opp_get_opp_table(dev); + if (IS_ERR_OR_NULL(table)) { + ret = devm_pm_opp_set_regulators(dev, reg_names); + if (ret && ret != -ENODEV) { + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n"); + return ret; + } + + ret = devm_pm_opp_of_add_table(dev); + if (ret) + return ret; + } else { + dev_pm_opp_put_opp_table(table); } - ret = devm_pm_opp_of_add_table(dev); - if (ret) - return ret; - spin_lock_init(&pdevfreq->lock); panthor_devfreq_reset(pdevfreq); @@ -198,7 +219,6 @@ int panthor_devfreq_init(struct panthor_device *ptdev) return PTR_ERR(opp); panthor_devfreq_profile.initial_freq = cur_freq; - ptdev->current_frequency = cur_freq; /* * Set the recommend OPP this will enable and configure the regulator @@ -296,3 +316,19 @@ void panthor_devfreq_record_idle(struct panthor_device *ptdev) spin_unlock_irqrestore(&pdevfreq->lock, irqflags); } + +unsigned long panthor_devfreq_get_freq(struct panthor_device *ptdev) +{ + struct panthor_devfreq *pdevfreq = ptdev->devfreq; + unsigned long freq = 0; + int ret; + + if (!pdevfreq->devfreq) + return 0; + + ret = pdevfreq->devfreq->profile->get_cur_freq(ptdev->base.dev, &freq); + if (ret) + return 0; + + return freq; +} diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.h b/drivers/gpu/drm/panthor/panthor_devfreq.h index b7631de695f7..f8e29e02f66c 100644 --- a/drivers/gpu/drm/panthor/panthor_devfreq.h +++ b/drivers/gpu/drm/panthor/panthor_devfreq.h @@ -18,4 +18,6 @@ void panthor_devfreq_suspend(struct panthor_device *ptdev); void panthor_devfreq_record_busy(struct panthor_device *ptdev); void panthor_devfreq_record_idle(struct panthor_device *ptdev); +unsigned long panthor_devfreq_get_freq(struct panthor_device *ptdev); + #endif /* __PANTHOR_DEVFREQ_H__ */ diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index 81df49880bd8..e133b1e0ad6d 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -13,6 +13,7 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "panthor_devfreq.h" #include "panthor_device.h" @@ -20,6 +21,7 @@ #include "panthor_gpu.h" #include "panthor_hw.h" #include "panthor_mmu.h" +#include "panthor_pwr.h" #include "panthor_regs.h" #include "panthor_sched.h" @@ -65,6 +67,16 @@ static int panthor_clk_init(struct panthor_device *ptdev) return 0; } +static int panthor_init_power(struct device *dev) +{ + struct dev_pm_domain_list *pd_list = NULL; + + if (dev->pm_domain) + return 0; + + return devm_pm_domain_attach_list(dev, NULL, &pd_list); +} + void panthor_device_unplug(struct panthor_device *ptdev) { /* This function can be called from two different path: the reset work @@ -83,6 +95,8 @@ void panthor_device_unplug(struct panthor_device *ptdev) return; } + drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0); + /* Call drm_dev_unplug() so any access to HW blocks happening after * that point get rejected. */ @@ -93,8 +107,6 @@ void panthor_device_unplug(struct panthor_device *ptdev) */ mutex_unlock(&ptdev->unplug.lock); - drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0); - /* Now, try to cleanly shutdown the GPU before the device resources * get reclaimed. */ @@ -102,6 +114,7 @@ void panthor_device_unplug(struct panthor_device *ptdev) panthor_fw_unplug(ptdev); panthor_mmu_unplug(ptdev); panthor_gpu_unplug(ptdev); + panthor_pwr_unplug(ptdev); pm_runtime_dont_use_autosuspend(ptdev->base.dev); pm_runtime_put_sync_suspend(ptdev->base.dev); @@ -120,7 +133,7 @@ static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data) { struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base); - cancel_work_sync(&ptdev->reset.work); + disable_work_sync(&ptdev->reset.work); destroy_workqueue(ptdev->reset.wq); } @@ -141,8 +154,8 @@ static void panthor_device_reset_work(struct work_struct *work) panthor_sched_pre_reset(ptdev); panthor_fw_pre_reset(ptdev, true); panthor_mmu_pre_reset(ptdev); - panthor_gpu_soft_reset(ptdev); - panthor_gpu_l2_power_on(ptdev); + panthor_hw_soft_reset(ptdev); + panthor_hw_l2_power_on(ptdev); panthor_mmu_post_reset(ptdev); ret = panthor_fw_post_reset(ptdev); atomic_set(&ptdev->reset.pending, 0); @@ -172,6 +185,8 @@ int panthor_device_init(struct panthor_device *ptdev) struct page *p; int ret; + ptdev->soc_data = of_device_get_match_data(ptdev->base.dev); + init_completion(&ptdev->unplug.done); ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock); if (ret) @@ -219,6 +234,12 @@ int panthor_device_init(struct panthor_device *ptdev) if (ret) return ret; + ret = panthor_init_power(ptdev->base.dev); + if (ret < 0) { + drm_err(&ptdev->base, "init power domains failed, ret=%d", ret); + return ret; + } + ret = panthor_devfreq_init(ptdev); if (ret) return ret; @@ -249,10 +270,14 @@ int panthor_device_init(struct panthor_device *ptdev) if (ret) goto err_rpm_put; - ret = panthor_gpu_init(ptdev); + ret = panthor_pwr_init(ptdev); if (ret) goto err_rpm_put; + ret = panthor_gpu_init(ptdev); + if (ret) + goto err_unplug_pwr; + ret = panthor_gpu_coherency_init(ptdev); if (ret) goto err_unplug_gpu; @@ -293,6 +318,9 @@ err_unplug_mmu: err_unplug_gpu: panthor_gpu_unplug(ptdev); +err_unplug_pwr: + panthor_pwr_unplug(ptdev); + err_rpm_put: pm_runtime_put_sync_suspend(ptdev->base.dev); return ret; @@ -446,6 +474,7 @@ static int panthor_device_resume_hw_components(struct panthor_device *ptdev) { int ret; + panthor_pwr_resume(ptdev); panthor_gpu_resume(ptdev); panthor_mmu_resume(ptdev); @@ -455,6 +484,7 @@ static int panthor_device_resume_hw_components(struct panthor_device *ptdev) panthor_mmu_suspend(ptdev); panthor_gpu_suspend(ptdev); + panthor_pwr_suspend(ptdev); return ret; } @@ -568,6 +598,7 @@ int panthor_device_suspend(struct device *dev) panthor_fw_suspend(ptdev); panthor_mmu_suspend(ptdev); panthor_gpu_suspend(ptdev); + panthor_pwr_suspend(ptdev); drm_dev_exit(cookie); } diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index 4fc7cf2aeed5..f35e52b9546a 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -24,14 +24,27 @@ struct panthor_device; struct panthor_gpu; struct panthor_group_pool; struct panthor_heap_pool; +struct panthor_hw; struct panthor_job; struct panthor_mmu; struct panthor_fw; struct panthor_perfcnt; +struct panthor_pwr; struct panthor_vm; struct panthor_vm_pool; /** + * struct panthor_soc_data - Panthor SoC Data + */ +struct panthor_soc_data { + /** @asn_hash_enable: True if GPU_L2_CONFIG_ASN_HASH_ENABLE must be set. */ + bool asn_hash_enable; + + /** @asn_hash: ASN_HASH values when asn_hash_enable is true. */ + u32 asn_hash[3]; +}; + +/** * enum panthor_device_pm_state - PM state */ enum panthor_device_pm_state { @@ -93,6 +106,9 @@ struct panthor_device { /** @base: Base drm_device. */ struct drm_device base; + /** @soc_data: Optional SoC data. */ + const struct panthor_soc_data *soc_data; + /** @phys_addr: Physical address of the iomem region. */ phys_addr_t phys_addr; @@ -120,6 +136,12 @@ struct panthor_device { /** @csif_info: Command stream interface information. */ struct drm_panthor_csif_info csif_info; + /** @hw: GPU-specific data. */ + struct panthor_hw *hw; + + /** @pwr: Power control management data. */ + struct panthor_pwr *pwr; + /** @gpu: GPU management data. */ struct panthor_gpu *gpu; @@ -200,9 +222,6 @@ struct panthor_device { /** @profile_mask: User-set profiling flags for job accounting. */ u32 profile_mask; - /** @current_frequency: Device clock frequency at present. Set by DVFS*/ - unsigned long current_frequency; - /** @fast_rate: Maximum device clock frequency. Set by DVFS */ unsigned long fast_rate; diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index 4c202fc5ce05..d1d4c50da5bf 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -20,11 +20,13 @@ #include <drm/drm_drv.h> #include <drm/drm_exec.h> #include <drm/drm_ioctl.h> +#include <drm/drm_print.h> #include <drm/drm_syncobj.h> #include <drm/drm_utils.h> #include <drm/gpu_scheduler.h> #include <drm/panthor_drm.h> +#include "panthor_devfreq.h" #include "panthor_device.h" #include "panthor_fw.h" #include "panthor_gem.h" @@ -1105,7 +1107,7 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data, if (ret) goto out; - ret = panthor_group_create(pfile, args, queue_args); + ret = panthor_group_create(pfile, args, queue_args, file->client_id); if (ret < 0) goto out; args->group_handle = ret; @@ -1519,7 +1521,8 @@ static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev, drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles); drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate); - drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency); + drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", + panthor_devfreq_get_freq(ptdev)); } static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file) @@ -1682,7 +1685,13 @@ static struct attribute *panthor_attrs[] = { ATTRIBUTE_GROUPS(panthor); +static const struct panthor_soc_data soc_data_mediatek_mt8196 = { + .asn_hash_enable = true, + .asn_hash = { 0xb, 0xe, 0x0, }, +}; + static const struct of_device_id dt_match[] = { + { .compatible = "mediatek,mt8196-mali", .data = &soc_data_mediatek_mt8196, }, { .compatible = "rockchip,rk3588-mali" }, { .compatible = "arm,mali-valhall-csf" }, {} diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index df767e82148a..1a5e3c1a27fb 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -16,11 +16,13 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "panthor_device.h" #include "panthor_fw.h" #include "panthor_gem.h" #include "panthor_gpu.h" +#include "panthor_hw.h" #include "panthor_mmu.h" #include "panthor_regs.h" #include "panthor_sched.h" @@ -32,6 +34,7 @@ #define PROGRESS_TIMEOUT_SCALE_SHIFT 10 #define IDLE_HYSTERESIS_US 800 #define PWROFF_HYSTERESIS_US 10000 +#define MCU_HALT_TIMEOUT_US (1ULL * USEC_PER_SEC) /** * struct panthor_fw_binary_hdr - Firmware binary header. @@ -316,6 +319,49 @@ panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot) return &ptdev->fw->iface.streams[csg_slot][cs_slot]; } +static bool panthor_fw_has_glb_state(struct panthor_device *ptdev) +{ + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); + + return glb_iface->control->version >= CSF_IFACE_VERSION(4, 1, 0); +} + +static bool panthor_fw_has_64bit_ep_req(struct panthor_device *ptdev) +{ + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); + + return glb_iface->control->version >= CSF_IFACE_VERSION(4, 0, 0); +} + +u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface) +{ + if (panthor_fw_has_64bit_ep_req(ptdev)) + return csg_iface->input->endpoint_req2; + else + return csg_iface->input->endpoint_req; +} + +void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface, u64 value) +{ + if (panthor_fw_has_64bit_ep_req(ptdev)) + csg_iface->input->endpoint_req2 = value; + else + csg_iface->input->endpoint_req = lower_32_bits(value); +} + +void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface, u64 value, + u64 mask) +{ + if (panthor_fw_has_64bit_ep_req(ptdev)) + panthor_fw_update_reqs64(csg_iface, endpoint_req2, value, mask); + else + panthor_fw_update_reqs(csg_iface, endpoint_req, lower_32_bits(value), + lower_32_bits(mask)); +} + /** * panthor_fw_conv_timeout() - Convert a timeout into a cycle-count * @ptdev: Device. @@ -995,6 +1041,9 @@ static void panthor_fw_init_global_iface(struct panthor_device *ptdev) GLB_IDLE_EN | GLB_IDLE; + if (panthor_fw_has_glb_state(ptdev)) + glb_iface->input->ack_irq_mask |= GLB_STATE_MASK; + panthor_fw_update_reqs(glb_iface, req, GLB_IDLE_EN, GLB_IDLE_EN); panthor_fw_toggle_reqs(glb_iface, req, ack, GLB_CFG_ALLOC_EN | @@ -1068,6 +1117,54 @@ static void panthor_fw_stop(struct panthor_device *ptdev) drm_err(&ptdev->base, "Failed to stop MCU"); } +static bool panthor_fw_mcu_halted(struct panthor_device *ptdev) +{ + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); + bool halted; + + halted = gpu_read(ptdev, MCU_STATUS) == MCU_STATUS_HALT; + + if (panthor_fw_has_glb_state(ptdev)) + halted &= (GLB_STATE_GET(glb_iface->output->ack) == GLB_STATE_HALT); + + return halted; +} + +static void panthor_fw_halt_mcu(struct panthor_device *ptdev) +{ + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); + + if (panthor_fw_has_glb_state(ptdev)) + panthor_fw_update_reqs(glb_iface, req, GLB_STATE(GLB_STATE_HALT), GLB_STATE_MASK); + else + panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT); + + gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1); +} + +static bool panthor_fw_wait_mcu_halted(struct panthor_device *ptdev) +{ + bool halted = false; + + if (read_poll_timeout_atomic(panthor_fw_mcu_halted, halted, halted, 10, + MCU_HALT_TIMEOUT_US, 0, ptdev)) { + drm_warn(&ptdev->base, "Timed out waiting for MCU to halt"); + return false; + } + + return true; +} + +static void panthor_fw_mcu_set_active(struct panthor_device *ptdev) +{ + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); + + if (panthor_fw_has_glb_state(ptdev)) + panthor_fw_update_reqs(glb_iface, req, GLB_STATE(GLB_STATE_ACTIVE), GLB_STATE_MASK); + else + panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT); +} + /** * panthor_fw_pre_reset() - Call before a reset. * @ptdev: Device. @@ -1084,19 +1181,13 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang) ptdev->reset.fast = false; if (!on_hang) { - struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); - u32 status; - - panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT); - gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1); - if (!gpu_read_poll_timeout(ptdev, MCU_STATUS, status, - status == MCU_STATUS_HALT, 10, - 100000)) { - ptdev->reset.fast = true; - } else { + panthor_fw_halt_mcu(ptdev); + if (!panthor_fw_wait_mcu_halted(ptdev)) drm_warn(&ptdev->base, "Failed to cleanly suspend MCU"); - } + else + ptdev->reset.fast = true; } + panthor_fw_stop(ptdev); panthor_job_irq_suspend(&ptdev->fw->irq); panthor_fw_stop(ptdev); @@ -1125,14 +1216,14 @@ int panthor_fw_post_reset(struct panthor_device *ptdev) */ panthor_reload_fw_sections(ptdev, true); } else { - /* The FW detects 0 -> 1 transitions. Make sure we reset - * the HALT bit before the FW is rebooted. + /* + * If the FW was previously successfully halted in the pre-reset + * operation, we need to transition it to active again before + * the FW is rebooted. * This is not needed on a slow reset because FW sections are * re-initialized. */ - struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); - - panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT); + panthor_fw_mcu_set_active(ptdev); } ret = panthor_fw_start(ptdev); @@ -1163,13 +1254,17 @@ void panthor_fw_unplug(struct panthor_device *ptdev) { struct panthor_fw_section *section; - cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work); + disable_delayed_work_sync(&ptdev->fw->watchdog.ping_work); if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) { /* Make sure the IRQ handler cannot be called after that point. */ if (ptdev->fw->irq.irq) panthor_job_irq_suspend(&ptdev->fw->irq); + panthor_fw_halt_mcu(ptdev); + if (!panthor_fw_wait_mcu_halted(ptdev)) + drm_warn(&ptdev->base, "Failed to halt MCU on unplug"); + panthor_fw_stop(ptdev); } @@ -1185,7 +1280,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev) ptdev->fw->vm = NULL; if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) - panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000); + panthor_hw_l2_power_off(ptdev); } /** @@ -1364,7 +1459,7 @@ int panthor_fw_init(struct panthor_device *ptdev) return ret; } - ret = panthor_gpu_l2_power_on(ptdev); + ret = panthor_hw_l2_power_on(ptdev); if (ret) return ret; @@ -1408,3 +1503,4 @@ MODULE_FIRMWARE("arm/mali/arch10.12/mali_csffw.bin"); MODULE_FIRMWARE("arm/mali/arch11.8/mali_csffw.bin"); MODULE_FIRMWARE("arm/mali/arch12.8/mali_csffw.bin"); MODULE_FIRMWARE("arm/mali/arch13.8/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch14.8/mali_csffw.bin"); diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h index 6598d96c6d2a..fbdc21469ba3 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.h +++ b/drivers/gpu/drm/panthor/panthor_fw.h @@ -167,10 +167,11 @@ struct panthor_fw_csg_input_iface { #define CSG_EP_REQ_TILER(x) (((x) << 16) & GENMASK(19, 16)) #define CSG_EP_REQ_EXCL_COMPUTE BIT(20) #define CSG_EP_REQ_EXCL_FRAGMENT BIT(21) -#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & GENMASK(31, 28)) #define CSG_EP_REQ_PRIORITY_MASK GENMASK(31, 28) +#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & CSG_EP_REQ_PRIORITY_MASK) +#define CSG_EP_REQ_PRIORITY_GET(x) (((x) & CSG_EP_REQ_PRIORITY_MASK) >> 28) u32 endpoint_req; - u32 reserved2[2]; + u64 endpoint_req2; u64 suspend_buf; u64 protm_suspend_buf; u32 config; @@ -214,6 +215,13 @@ struct panthor_fw_global_input_iface { #define GLB_FWCFG_UPDATE BIT(9) #define GLB_IDLE_EN BIT(10) #define GLB_SLEEP BIT(12) +#define GLB_STATE_MASK GENMASK(14, 12) +#define GLB_STATE_ACTIVE 0 +#define GLB_STATE_HALT 1 +#define GLB_STATE_SLEEP 2 +#define GLB_STATE_SUSPEND 3 +#define GLB_STATE(x) (((x) << 12) & GLB_STATE_MASK) +#define GLB_STATE_GET(x) (((x) & GLB_STATE_MASK) >> 12) #define GLB_INACTIVE_COMPUTE BIT(20) #define GLB_INACTIVE_FRAGMENT BIT(21) #define GLB_INACTIVE_TILER BIT(22) @@ -457,6 +465,16 @@ struct panthor_fw_global_iface { spin_unlock(&(__iface)->lock); \ } while (0) +#define panthor_fw_update_reqs64(__iface, __in_reg, __val, __mask) \ + do { \ + u64 __cur_val, __new_val; \ + spin_lock(&(__iface)->lock); \ + __cur_val = READ_ONCE((__iface)->input->__in_reg); \ + __new_val = (__cur_val & ~(__mask)) | ((__val) & (__mask)); \ + WRITE_ONCE((__iface)->input->__in_reg, __new_val); \ + spin_unlock(&(__iface)->lock); \ + } while (0) + struct panthor_fw_global_iface * panthor_fw_get_glb_iface(struct panthor_device *ptdev); @@ -466,6 +484,16 @@ panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot); struct panthor_fw_cs_iface * panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot); +u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface); + +void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface, u64 value); + +void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface, u64 value, + u64 mask); + int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_id, u32 req_mask, u32 *acked, u32 timeout_ms); diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index 156c7a0b62a2..fbde78db270a 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -8,6 +8,7 @@ #include <linux/err.h> #include <linux/slab.h> +#include <drm/drm_print.h> #include <drm/panthor_drm.h> #include "panthor_device.h" @@ -86,7 +87,6 @@ static void panthor_gem_free_object(struct drm_gem_object *obj) void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo) { struct panthor_vm *vm; - int ret; if (IS_ERR_OR_NULL(bo)) return; @@ -94,18 +94,11 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo) vm = bo->vm; panthor_kernel_bo_vunmap(bo); - if (drm_WARN_ON(bo->obj->dev, - to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm))) - goto out_free_bo; - - ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size); - if (ret) - goto out_free_bo; - + drm_WARN_ON(bo->obj->dev, + to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)); + panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size); panthor_vm_free_va(vm, &bo->va_node); drm_gem_object_put(bo->obj); - -out_free_bo: panthor_vm_put(vm); kfree(bo); } @@ -152,6 +145,9 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, bo = to_panthor_bo(&obj->base); kbo->obj = &obj->base; bo->flags = bo_flags; + bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm); + drm_gem_object_get(bo->exclusive_vm_root_gem); + bo->base.base.resv = bo->exclusive_vm_root_gem->resv; if (vm == panthor_fw_vm(ptdev)) debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED; @@ -175,9 +171,6 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, goto err_free_va; kbo->vm = panthor_vm_get(vm); - bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm); - drm_gem_object_get(bo->exclusive_vm_root_gem); - bo->base.base.resv = bo->exclusive_vm_root_gem->resv; return kbo; err_free_va: @@ -288,6 +281,23 @@ panthor_gem_create_with_handle(struct drm_file *file, panthor_gem_debugfs_set_usage_flags(bo, 0); + /* If this is a write-combine mapping, we query the sgt to force a CPU + * cache flush (dma_map_sgtable() is called when the sgt is created). + * This ensures the zero-ing is visible to any uncached mapping created + * by vmap/mmap. + * FIXME: Ideally this should be done when pages are allocated, not at + * BO creation time. + */ + if (shmem->map_wc) { + struct sg_table *sgt; + + sgt = drm_gem_shmem_get_pages_sgt(shmem); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto out_put_gem; + } + } + /* * Allocate an id of idr table where the obj is registered * and handle has the id what user can see. @@ -296,6 +306,7 @@ panthor_gem_create_with_handle(struct drm_file *file, if (!ret) *size = bo->base.base.size; +out_put_gem: /* drop reference from allocate - handle holds it now. */ drm_gem_object_put(&shmem->base); diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index db69449a5be0..06b231b2460a 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -15,9 +15,11 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "panthor_device.h" #include "panthor_gpu.h" +#include "panthor_hw.h" #include "panthor_regs.h" /** @@ -52,6 +54,28 @@ static void panthor_gpu_coherency_set(struct panthor_device *ptdev) ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE); } +static void panthor_gpu_l2_config_set(struct panthor_device *ptdev) +{ + const struct panthor_soc_data *data = ptdev->soc_data; + u32 l2_config; + u32 i; + + if (!data || !data->asn_hash_enable) + return; + + if (GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) < 11) { + drm_err(&ptdev->base, "Custom ASN hash not supported by the device"); + return; + } + + for (i = 0; i < ARRAY_SIZE(data->asn_hash); i++) + gpu_write(ptdev, GPU_ASN_HASH(i), data->asn_hash[i]); + + l2_config = gpu_read(ptdev, GPU_L2_CONFIG); + l2_config |= GPU_L2_CONFIG_ASN_HASH_ENABLE; + gpu_write(ptdev, GPU_L2_CONFIG, l2_config); +} + static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status) { gpu_write(ptdev, GPU_INT_CLEAR, status); @@ -218,6 +242,11 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev, return 0; } +void panthor_gpu_l2_power_off(struct panthor_device *ptdev) +{ + panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000); +} + /** * panthor_gpu_l2_power_on() - Power-on the L2-cache * @ptdev: Device. @@ -241,8 +270,9 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev) hweight64(ptdev->gpu_info.shader_present)); } - /* Set the desired coherency mode before the power up of L2 */ + /* Set the desired coherency mode and L2 config before the power up of L2 */ panthor_gpu_coherency_set(ptdev); + panthor_gpu_l2_config_set(ptdev); return panthor_gpu_power_on(ptdev, L2, 1, 20000); } @@ -344,9 +374,9 @@ void panthor_gpu_suspend(struct panthor_device *ptdev) { /* On a fast reset, simply power down the L2. */ if (!ptdev->reset.fast) - panthor_gpu_soft_reset(ptdev); + panthor_hw_soft_reset(ptdev); else - panthor_gpu_power_off(ptdev, L2, 1, 20000); + panthor_hw_l2_power_off(ptdev); panthor_gpu_irq_suspend(&ptdev->gpu->irq); } @@ -361,6 +391,6 @@ void panthor_gpu_suspend(struct panthor_device *ptdev) void panthor_gpu_resume(struct panthor_device *ptdev) { panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK); - panthor_gpu_l2_power_on(ptdev); + panthor_hw_l2_power_on(ptdev); } diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h index 7c17a8c06858..12e66f48ced1 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.h +++ b/drivers/gpu/drm/panthor/panthor_gpu.h @@ -46,6 +46,7 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev, type ## _PWRTRANS, \ mask, timeout_us) +void panthor_gpu_l2_power_off(struct panthor_device *ptdev); int panthor_gpu_l2_power_on(struct panthor_device *ptdev); int panthor_gpu_flush_caches(struct panthor_device *ptdev, u32 l2, u32 lsc, u32 other); diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c index d236e9ceade4..0b6ff4c0a11b 100644 --- a/drivers/gpu/drm/panthor/panthor_heap.c +++ b/drivers/gpu/drm/panthor/panthor_heap.c @@ -4,6 +4,7 @@ #include <linux/iosys-map.h> #include <linux/rwsem.h> +#include <drm/drm_print.h> #include <drm/panthor_drm.h> #include "panthor_device.h" diff --git a/drivers/gpu/drm/panthor/panthor_hw.c b/drivers/gpu/drm/panthor/panthor_hw.c index 4f2858114e5e..87ebb7ae42c4 100644 --- a/drivers/gpu/drm/panthor/panthor_hw.c +++ b/drivers/gpu/drm/panthor/panthor_hw.c @@ -1,13 +1,58 @@ // SPDX-License-Identifier: GPL-2.0 or MIT /* Copyright 2025 ARM Limited. All rights reserved. */ +#include <drm/drm_print.h> + #include "panthor_device.h" +#include "panthor_gpu.h" #include "panthor_hw.h" +#include "panthor_pwr.h" #include "panthor_regs.h" #define GPU_PROD_ID_MAKE(arch_major, prod_major) \ (((arch_major) << 24) | (prod_major)) +/** struct panthor_hw_entry - HW arch major to panthor_hw binding entry */ +struct panthor_hw_entry { + /** @arch_min: Minimum supported architecture major value (inclusive) */ + u8 arch_min; + + /** @arch_max: Maximum supported architecture major value (inclusive) */ + u8 arch_max; + + /** @hwdev: Pointer to panthor_hw structure */ + struct panthor_hw *hwdev; +}; + +static struct panthor_hw panthor_hw_arch_v10 = { + .ops = { + .soft_reset = panthor_gpu_soft_reset, + .l2_power_off = panthor_gpu_l2_power_off, + .l2_power_on = panthor_gpu_l2_power_on, + }, +}; + +static struct panthor_hw panthor_hw_arch_v14 = { + .ops = { + .soft_reset = panthor_pwr_reset_soft, + .l2_power_off = panthor_pwr_l2_power_off, + .l2_power_on = panthor_pwr_l2_power_on, + }, +}; + +static struct panthor_hw_entry panthor_hw_match[] = { + { + .arch_min = 10, + .arch_max = 13, + .hwdev = &panthor_hw_arch_v10, + }, + { + .arch_min = 14, + .arch_max = 14, + .hwdev = &panthor_hw_arch_v14, + }, +}; + static char *get_gpu_model_name(struct panthor_device *ptdev) { const u32 gpu_id = ptdev->gpu_info.gpu_id; @@ -53,6 +98,12 @@ static char *get_gpu_model_name(struct panthor_device *ptdev) fallthrough; case GPU_PROD_ID_MAKE(13, 1): return "Mali-G625"; + case GPU_PROD_ID_MAKE(14, 0): + return "Mali-G1-Ultra"; + case GPU_PROD_ID_MAKE(14, 1): + return "Mali-G1-Premium"; + case GPU_PROD_ID_MAKE(14, 3): + return "Mali-G1-Pro"; } return "(Unknown Mali GPU)"; @@ -62,7 +113,6 @@ static void panthor_gpu_info_init(struct panthor_device *ptdev) { unsigned int i; - ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID); ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID); ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES); @@ -80,12 +130,19 @@ static void panthor_gpu_info_init(struct panthor_device *ptdev) ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT); - ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT); - ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT); - ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT); - /* Introduced in arch 11.x */ ptdev->gpu_info.gpu_features = gpu_read64(ptdev, GPU_FEATURES); + + if (panthor_hw_has_pwr_ctrl(ptdev)) { + /* Introduced in arch 14.x */ + ptdev->gpu_info.l2_present = gpu_read64(ptdev, PWR_L2_PRESENT); + ptdev->gpu_info.tiler_present = gpu_read64(ptdev, PWR_TILER_PRESENT); + ptdev->gpu_info.shader_present = gpu_read64(ptdev, PWR_SHADER_PRESENT); + } else { + ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT); + ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT); + ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT); + } } static void panthor_hw_info_init(struct panthor_device *ptdev) @@ -117,8 +174,50 @@ static void panthor_hw_info_init(struct panthor_device *ptdev) ptdev->gpu_info.tiler_present); } +static int panthor_hw_bind_device(struct panthor_device *ptdev) +{ + struct panthor_hw *hdev = NULL; + const u32 arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id); + int i = 0; + + for (i = 0; i < ARRAY_SIZE(panthor_hw_match); i++) { + struct panthor_hw_entry *entry = &panthor_hw_match[i]; + + if (arch_major >= entry->arch_min && arch_major <= entry->arch_max) { + hdev = entry->hwdev; + break; + } + } + + if (!hdev) + return -EOPNOTSUPP; + + ptdev->hw = hdev; + + return 0; +} + +static int panthor_hw_gpu_id_init(struct panthor_device *ptdev) +{ + ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); + if (!ptdev->gpu_info.gpu_id) + return -ENXIO; + + return 0; +} + int panthor_hw_init(struct panthor_device *ptdev) { + int ret = 0; + + ret = panthor_hw_gpu_id_init(ptdev); + if (ret) + return ret; + + ret = panthor_hw_bind_device(ptdev); + if (ret) + return ret; + panthor_hw_info_init(ptdev); return 0; diff --git a/drivers/gpu/drm/panthor/panthor_hw.h b/drivers/gpu/drm/panthor/panthor_hw.h index 0af6acc6aa6a..56c68c1e9c26 100644 --- a/drivers/gpu/drm/panthor/panthor_hw.h +++ b/drivers/gpu/drm/panthor/panthor_hw.h @@ -4,8 +4,53 @@ #ifndef __PANTHOR_HW_H__ #define __PANTHOR_HW_H__ -struct panthor_device; +#include "panthor_device.h" +#include "panthor_regs.h" + +/** + * struct panthor_hw_ops - HW operations that are specific to a GPU + */ +struct panthor_hw_ops { + /** @soft_reset: Soft reset function pointer */ + int (*soft_reset)(struct panthor_device *ptdev); + + /** @l2_power_off: L2 power off function pointer */ + void (*l2_power_off)(struct panthor_device *ptdev); + + /** @l2_power_on: L2 power on function pointer */ + int (*l2_power_on)(struct panthor_device *ptdev); +}; + +/** + * struct panthor_hw - GPU specific register mapping and functions + */ +struct panthor_hw { + /** @features: Bitmap containing panthor_hw_feature */ + + /** @ops: Panthor HW specific operations */ + struct panthor_hw_ops ops; +}; int panthor_hw_init(struct panthor_device *ptdev); +static inline int panthor_hw_soft_reset(struct panthor_device *ptdev) +{ + return ptdev->hw->ops.soft_reset(ptdev); +} + +static inline int panthor_hw_l2_power_on(struct panthor_device *ptdev) +{ + return ptdev->hw->ops.l2_power_on(ptdev); +} + +static inline void panthor_hw_l2_power_off(struct panthor_device *ptdev) +{ + ptdev->hw->ops.l2_power_off(ptdev); +} + +static inline bool panthor_hw_has_pwr_ctrl(struct panthor_device *ptdev) +{ + return GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) >= 14; +} + #endif /* __PANTHOR_HW_H__ */ diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 7870e7dbaa5d..d4839d282689 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -7,6 +7,7 @@ #include <drm/drm_exec.h> #include <drm/drm_gpuvm.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <drm/gpu_scheduler.h> #include <drm/panthor_drm.h> @@ -181,20 +182,6 @@ struct panthor_vm_op_ctx { u64 range; } va; - /** - * @returned_vmas: List of panthor_vma objects returned after a VM operation. - * - * For unmap operations, this will contain all VMAs that were covered by the - * specified VA range. - * - * For map operations, this will contain all VMAs that previously mapped to - * the specified VA range. - * - * Those VMAs, and the resources they point to will be released as part of - * the op_ctx cleanup operation. - */ - struct list_head returned_vmas; - /** @map: Fields specific to a map operation. */ struct { /** @map.vm_bo: Buffer object to map. */ @@ -917,10 +904,9 @@ static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) { struct panthor_device *ptdev = vm->ptdev; struct io_pgtable_ops *ops = vm->pgtbl_ops; + u64 start_iova = iova; u64 offset = 0; - drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size); - while (offset < size) { size_t unmapped_sz = 0, pgcount; size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount); @@ -935,6 +921,12 @@ static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) panthor_vm_flush_range(vm, iova, offset + unmapped_sz); return -EINVAL; } + + drm_dbg(&ptdev->base, + "unmap: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pgcnt=%zu, pgsz=%zu", + vm->as.id, start_iova, size, iova + offset, + unmapped_sz / pgsize, pgsize); + offset += unmapped_sz; } @@ -950,6 +942,7 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, struct scatterlist *sgl; struct io_pgtable_ops *ops = vm->pgtbl_ops; u64 start_iova = iova; + u64 start_size = size; int ret; if (!size) @@ -969,15 +962,18 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, len = min_t(size_t, len, size); size -= len; - drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx", - vm->as.id, iova, &paddr, len); - while (len) { size_t pgcount, mapped = 0; size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_KERNEL, &mapped); + + drm_dbg(&ptdev->base, + "map: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pa=%pad, pgcnt=%zu, pgsz=%zu", + vm->as.id, start_iova, start_size, iova, &paddr, + mapped / pgsize, pgsize); + iova += mapped; paddr += mapped; len -= mapped; @@ -1081,47 +1077,18 @@ void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node) mutex_unlock(&vm->mm_lock); } -static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo) +static void panthor_vm_bo_free(struct drm_gpuvm_bo *vm_bo) { struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj); - struct drm_gpuvm *vm = vm_bo->vm; - bool unpin; - /* We must retain the GEM before calling drm_gpuvm_bo_put(), - * otherwise the mutex might be destroyed while we hold it. - * Same goes for the VM, since we take the VM resv lock. - */ - drm_gem_object_get(&bo->base.base); - drm_gpuvm_get(vm); - - /* We take the resv lock to protect against concurrent accesses to the - * gpuvm evicted/extobj lists that are modified in - * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put() - * releases sthe last vm_bo reference. - * We take the BO GPUVA list lock to protect the vm_bo removal from the - * GEM vm_bo list. - */ - dma_resv_lock(drm_gpuvm_resv(vm), NULL); - mutex_lock(&bo->base.base.gpuva.lock); - unpin = drm_gpuvm_bo_put(vm_bo); - mutex_unlock(&bo->base.base.gpuva.lock); - dma_resv_unlock(drm_gpuvm_resv(vm)); - - /* If the vm_bo object was destroyed, release the pin reference that - * was hold by this object. - */ - if (unpin && !drm_gem_is_imported(&bo->base.base)) + if (!drm_gem_is_imported(&bo->base.base)) drm_gem_shmem_unpin(&bo->base); - - drm_gpuvm_put(vm); - drm_gem_object_put(&bo->base.base); + kfree(vm_bo); } static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx, struct panthor_vm *vm) { - struct panthor_vma *vma, *tmp_vma; - u32 remaining_pt_count = op_ctx->rsvd_page_tables.count - op_ctx->rsvd_page_tables.ptr; @@ -1134,16 +1101,26 @@ static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx, kfree(op_ctx->rsvd_page_tables.pages); if (op_ctx->map.vm_bo) - panthor_vm_bo_put(op_ctx->map.vm_bo); + drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo); for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) kfree(op_ctx->preallocated_vmas[i]); - list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) { - list_del(&vma->node); - panthor_vm_bo_put(vma->base.vm_bo); - kfree(vma); + drm_gpuvm_bo_deferred_cleanup(&vm->base); +} + +static void +panthor_vm_op_ctx_return_vma(struct panthor_vm_op_ctx *op_ctx, + struct panthor_vma *vma) +{ + for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) { + if (!op_ctx->preallocated_vmas[i]) { + op_ctx->preallocated_vmas[i] = vma; + return; + } } + + WARN_ON_ONCE(1); } static struct panthor_vma * @@ -1236,7 +1213,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, return -EINVAL; memset(op_ctx, 0, sizeof(*op_ctx)); - INIT_LIST_HEAD(&op_ctx->returned_vmas); op_ctx->flags = flags; op_ctx->va.range = size; op_ctx->va.addr = va; @@ -1247,7 +1223,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, if (!drm_gem_is_imported(&bo->base.base)) { /* Pre-reserve the BO pages, so the map operation doesn't have to - * allocate. + * allocate. This pin is dropped in panthor_vm_bo_free(), so + * once we have successfully called drm_gpuvm_bo_create(), + * GPUVM will take care of dropping the pin for us. */ ret = drm_gem_shmem_pin(&bo->base); if (ret) @@ -1286,16 +1264,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, mutex_unlock(&bo->base.base.gpuva.lock); dma_resv_unlock(panthor_vm_resv(vm)); - /* If the a vm_bo for this <VM,BO> combination exists, it already - * retains a pin ref, and we can release the one we took earlier. - * - * If our pre-allocated vm_bo is picked, it now retains the pin ref, - * which will be released in panthor_vm_bo_put(). - */ - if (preallocated_vm_bo != op_ctx->map.vm_bo && - !drm_gem_is_imported(&bo->base.base)) - drm_gem_shmem_unpin(&bo->base); - op_ctx->map.bo_offset = offset; /* L1, L2 and L3 page tables. @@ -1343,7 +1311,6 @@ static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx, int ret; memset(op_ctx, 0, sizeof(*op_ctx)); - INIT_LIST_HEAD(&op_ctx->returned_vmas); op_ctx->va.range = size; op_ctx->va.addr = va; op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP; @@ -1391,7 +1358,6 @@ static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx struct panthor_vm *vm) { memset(op_ctx, 0, sizeof(*op_ctx)); - INIT_LIST_HEAD(&op_ctx->returned_vmas); op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY; } @@ -2037,26 +2003,13 @@ static void panthor_vma_link(struct panthor_vm *vm, mutex_lock(&bo->base.base.gpuva.lock); drm_gpuva_link(&vma->base, vm_bo); - drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo)); mutex_unlock(&bo->base.base.gpuva.lock); } -static void panthor_vma_unlink(struct panthor_vm *vm, - struct panthor_vma *vma) +static void panthor_vma_unlink(struct panthor_vma *vma) { - struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj); - struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo); - - mutex_lock(&bo->base.base.gpuva.lock); - drm_gpuva_unlink(&vma->base); - mutex_unlock(&bo->base.base.gpuva.lock); - - /* drm_gpuva_unlink() release the vm_bo, but we manually retained it - * when entering this function, so we can implement deferred VMA - * destruction. Re-assign it here. - */ - vma->base.vm_bo = vm_bo; - list_add_tail(&vma->node, &vm->op_ctx->returned_vmas); + drm_gpuva_unlink_defer(&vma->base); + kfree(vma); } static void panthor_vma_init(struct panthor_vma *vma, u32 flags) @@ -2085,15 +2038,17 @@ static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv) ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags), op_ctx->map.sgt, op->map.gem.offset, op->map.va.range); - if (ret) + if (ret) { + panthor_vm_op_ctx_return_vma(op_ctx, vma); return ret; + } - /* Ref owned by the mapping now, clear the obj field so we don't release the - * pinning/obj ref behind GPUVA's back. - */ drm_gpuva_map(&vm->base, &vma->base, &op->map); panthor_vma_link(vm, vma, op_ctx->map.vm_bo); + + drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo); op_ctx->map.vm_bo = NULL; + return 0; } @@ -2132,16 +2087,14 @@ static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op, * owned by the old mapping which will be released when this * mapping is destroyed, we need to grab a ref here. */ - panthor_vma_link(vm, prev_vma, - drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo)); + panthor_vma_link(vm, prev_vma, op->remap.unmap->va->vm_bo); } if (next_vma) { - panthor_vma_link(vm, next_vma, - drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo)); + panthor_vma_link(vm, next_vma, op->remap.unmap->va->vm_bo); } - panthor_vma_unlink(vm, unmap_vma); + panthor_vma_unlink(unmap_vma); return 0; } @@ -2158,12 +2111,13 @@ static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op, return ret; drm_gpuva_unmap(&op->unmap); - panthor_vma_unlink(vm, unmap_vma); + panthor_vma_unlink(unmap_vma); return 0; } static const struct drm_gpuvm_ops panthor_gpuvm_ops = { .vm_free = panthor_vm_free, + .vm_bo_free = panthor_vm_bo_free, .sm_step_map = panthor_gpuva_sm_step_map, .sm_step_remap = panthor_gpuva_sm_step_remap, .sm_step_unmap = panthor_gpuva_sm_step_unmap, diff --git a/drivers/gpu/drm/panthor/panthor_pwr.c b/drivers/gpu/drm/panthor/panthor_pwr.c new file mode 100644 index 000000000000..57cfc7ce715b --- /dev/null +++ b/drivers/gpu/drm/panthor/panthor_pwr.c @@ -0,0 +1,549 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT +/* Copyright 2025 ARM Limited. All rights reserved. */ + +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/cleanup.h> +#include <linux/iopoll.h> +#include <linux/wait.h> + +#include <drm/drm_managed.h> +#include <drm/drm_print.h> + +#include "panthor_device.h" +#include "panthor_hw.h" +#include "panthor_pwr.h" +#include "panthor_regs.h" + +#define PWR_INTERRUPTS_MASK \ + (PWR_IRQ_POWER_CHANGED_SINGLE | \ + PWR_IRQ_POWER_CHANGED_ALL | \ + PWR_IRQ_DELEGATION_CHANGED | \ + PWR_IRQ_RESET_COMPLETED | \ + PWR_IRQ_RETRACT_COMPLETED | \ + PWR_IRQ_INSPECT_COMPLETED | \ + PWR_IRQ_COMMAND_NOT_ALLOWED | \ + PWR_IRQ_COMMAND_INVALID) + +#define PWR_ALL_CORES_MASK GENMASK_U64(63, 0) + +#define PWR_DOMAIN_MAX_BITS 16 + +#define PWR_TRANSITION_TIMEOUT_US (2ULL * USEC_PER_SEC) + +#define PWR_RETRACT_TIMEOUT_US (2ULL * USEC_PER_MSEC) + +#define PWR_RESET_TIMEOUT_MS 500 + +/** + * struct panthor_pwr - PWR_CONTROL block management data. + */ +struct panthor_pwr { + /** @irq: PWR irq. */ + struct panthor_irq irq; + + /** @reqs_lock: Lock protecting access to pending_reqs. */ + spinlock_t reqs_lock; + + /** @pending_reqs: Pending PWR requests. */ + u32 pending_reqs; + + /** @reqs_acked: PWR request wait queue. */ + wait_queue_head_t reqs_acked; +}; + +static void panthor_pwr_irq_handler(struct panthor_device *ptdev, u32 status) +{ + spin_lock(&ptdev->pwr->reqs_lock); + gpu_write(ptdev, PWR_INT_CLEAR, status); + + if (unlikely(status & PWR_IRQ_COMMAND_NOT_ALLOWED)) + drm_err(&ptdev->base, "PWR_IRQ: COMMAND_NOT_ALLOWED"); + + if (unlikely(status & PWR_IRQ_COMMAND_INVALID)) + drm_err(&ptdev->base, "PWR_IRQ: COMMAND_INVALID"); + + if (status & ptdev->pwr->pending_reqs) { + ptdev->pwr->pending_reqs &= ~status; + wake_up_all(&ptdev->pwr->reqs_acked); + } + spin_unlock(&ptdev->pwr->reqs_lock); +} +PANTHOR_IRQ_HANDLER(pwr, PWR, panthor_pwr_irq_handler); + +static void panthor_pwr_write_command(struct panthor_device *ptdev, u32 command, u64 args) +{ + if (args) + gpu_write64(ptdev, PWR_CMDARG, args); + + gpu_write(ptdev, PWR_COMMAND, command); +} + +static bool reset_irq_raised(struct panthor_device *ptdev) +{ + return gpu_read(ptdev, PWR_INT_RAWSTAT) & PWR_IRQ_RESET_COMPLETED; +} + +static bool reset_pending(struct panthor_device *ptdev) +{ + return (ptdev->pwr->pending_reqs & PWR_IRQ_RESET_COMPLETED); +} + +static int panthor_pwr_reset(struct panthor_device *ptdev, u32 reset_cmd) +{ + scoped_guard(spinlock_irqsave, &ptdev->pwr->reqs_lock) { + if (reset_pending(ptdev)) { + drm_WARN(&ptdev->base, 1, "Reset already pending"); + } else { + ptdev->pwr->pending_reqs |= PWR_IRQ_RESET_COMPLETED; + gpu_write(ptdev, PWR_INT_CLEAR, PWR_IRQ_RESET_COMPLETED); + panthor_pwr_write_command(ptdev, reset_cmd, 0); + } + } + + if (!wait_event_timeout(ptdev->pwr->reqs_acked, !reset_pending(ptdev), + msecs_to_jiffies(PWR_RESET_TIMEOUT_MS))) { + guard(spinlock_irqsave)(&ptdev->pwr->reqs_lock); + + if (reset_pending(ptdev) && !reset_irq_raised(ptdev)) { + drm_err(&ptdev->base, "RESET timed out (0x%x)", reset_cmd); + return -ETIMEDOUT; + } + + ptdev->pwr->pending_reqs &= ~PWR_IRQ_RESET_COMPLETED; + } + + return 0; +} + +static const char *get_domain_name(u8 domain) +{ + switch (domain) { + case PWR_COMMAND_DOMAIN_L2: + return "L2"; + case PWR_COMMAND_DOMAIN_TILER: + return "Tiler"; + case PWR_COMMAND_DOMAIN_SHADER: + return "Shader"; + case PWR_COMMAND_DOMAIN_BASE: + return "Base"; + case PWR_COMMAND_DOMAIN_STACK: + return "Stack"; + } + return "Unknown"; +} + +static u32 get_domain_base(u8 domain) +{ + switch (domain) { + case PWR_COMMAND_DOMAIN_L2: + return PWR_L2_PRESENT; + case PWR_COMMAND_DOMAIN_TILER: + return PWR_TILER_PRESENT; + case PWR_COMMAND_DOMAIN_SHADER: + return PWR_SHADER_PRESENT; + case PWR_COMMAND_DOMAIN_BASE: + return PWR_BASE_PRESENT; + case PWR_COMMAND_DOMAIN_STACK: + return PWR_STACK_PRESENT; + } + return 0; +} + +static u32 get_domain_ready_reg(u32 domain) +{ + return get_domain_base(domain) + (PWR_L2_READY - PWR_L2_PRESENT); +} + +static u32 get_domain_pwrtrans_reg(u32 domain) +{ + return get_domain_base(domain) + (PWR_L2_PWRTRANS - PWR_L2_PRESENT); +} + +static bool is_valid_domain(u32 domain) +{ + return get_domain_base(domain) != 0; +} + +static bool has_rtu(struct panthor_device *ptdev) +{ + return ptdev->gpu_info.gpu_features & GPU_FEATURES_RAY_TRAVERSAL; +} + +static u8 get_domain_subdomain(struct panthor_device *ptdev, u32 domain) +{ + if (domain == PWR_COMMAND_DOMAIN_SHADER && has_rtu(ptdev)) + return PWR_COMMAND_SUBDOMAIN_RTU; + + return 0; +} + +static int panthor_pwr_domain_wait_transition(struct panthor_device *ptdev, u32 domain, + u32 timeout_us) +{ + u32 pwrtrans_reg = get_domain_pwrtrans_reg(domain); + u64 val; + int ret = 0; + + ret = gpu_read64_poll_timeout(ptdev, pwrtrans_reg, val, !(PWR_ALL_CORES_MASK & val), 100, + timeout_us); + if (ret) { + drm_err(&ptdev->base, "%s domain power in transition, pwrtrans(0x%llx)", + get_domain_name(domain), val); + return ret; + } + + return 0; +} + +static void panthor_pwr_debug_info_show(struct panthor_device *ptdev) +{ + drm_info(&ptdev->base, "GPU_FEATURES: 0x%016llx", gpu_read64(ptdev, GPU_FEATURES)); + drm_info(&ptdev->base, "PWR_STATUS: 0x%016llx", gpu_read64(ptdev, PWR_STATUS)); + drm_info(&ptdev->base, "L2_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_L2_PRESENT)); + drm_info(&ptdev->base, "L2_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_L2_PWRTRANS)); + drm_info(&ptdev->base, "L2_READY: 0x%016llx", gpu_read64(ptdev, PWR_L2_READY)); + drm_info(&ptdev->base, "TILER_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_TILER_PRESENT)); + drm_info(&ptdev->base, "TILER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_TILER_PWRTRANS)); + drm_info(&ptdev->base, "TILER_READY: 0x%016llx", gpu_read64(ptdev, PWR_TILER_READY)); + drm_info(&ptdev->base, "SHADER_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PRESENT)); + drm_info(&ptdev->base, "SHADER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PWRTRANS)); + drm_info(&ptdev->base, "SHADER_READY: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_READY)); +} + +static int panthor_pwr_domain_transition(struct panthor_device *ptdev, u32 cmd, u32 domain, + u64 mask, u32 timeout_us) +{ + u32 ready_reg = get_domain_ready_reg(domain); + u32 pwr_cmd = PWR_COMMAND_DEF(cmd, domain, get_domain_subdomain(ptdev, domain)); + u64 expected_val = 0; + u64 val; + int ret = 0; + + if (drm_WARN_ON(&ptdev->base, !is_valid_domain(domain))) + return -EINVAL; + + switch (cmd) { + case PWR_COMMAND_POWER_DOWN: + expected_val = 0; + break; + case PWR_COMMAND_POWER_UP: + expected_val = mask; + break; + default: + drm_err(&ptdev->base, "Invalid power domain transition command (0x%x)", cmd); + return -EINVAL; + } + + ret = panthor_pwr_domain_wait_transition(ptdev, domain, timeout_us); + if (ret) + return ret; + + /* domain already in target state, return early */ + if ((gpu_read64(ptdev, ready_reg) & mask) == expected_val) + return 0; + + panthor_pwr_write_command(ptdev, pwr_cmd, mask); + + ret = gpu_read64_poll_timeout(ptdev, ready_reg, val, (mask & val) == expected_val, 100, + timeout_us); + if (ret) { + drm_err(&ptdev->base, + "timeout waiting on %s power domain transition, cmd(0x%x), arg(0x%llx)", + get_domain_name(domain), pwr_cmd, mask); + panthor_pwr_debug_info_show(ptdev); + return ret; + } + + return 0; +} + +#define panthor_pwr_domain_power_off(__ptdev, __domain, __mask, __timeout_us) \ + panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_DOWN, __domain, __mask, \ + __timeout_us) + +#define panthor_pwr_domain_power_on(__ptdev, __domain, __mask, __timeout_us) \ + panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_UP, __domain, __mask, __timeout_us) + +/** + * retract_domain() - Retract control of a domain from MCU + * @ptdev: Device. + * @domain: Domain to retract the control + * + * Retracting L2 domain is not expected since it won't be delegated. + * + * Return: 0 on success or retracted already. + * -EPERM if domain is L2. + * A negative error code otherwise. + */ +static int retract_domain(struct panthor_device *ptdev, u32 domain) +{ + const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_RETRACT, domain, 0); + const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS); + const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain); + const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain); + u64 val; + int ret; + + if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2)) + return -EPERM; + + ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, !(PWR_STATUS_RETRACT_PENDING & val), + 0, PWR_RETRACT_TIMEOUT_US); + if (ret) { + drm_err(&ptdev->base, "%s domain retract pending", get_domain_name(domain)); + return ret; + } + + if (!(pwr_status & delegated_mask)) { + drm_dbg(&ptdev->base, "%s domain already retracted", get_domain_name(domain)); + return 0; + } + + panthor_pwr_write_command(ptdev, pwr_cmd, 0); + + /* + * On successful retraction + * allow-flag will be set with delegated-flag being cleared. + */ + ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, + ((delegated_mask | allow_mask) & val) == allow_mask, 10, + PWR_TRANSITION_TIMEOUT_US); + if (ret) { + drm_err(&ptdev->base, "Retracting %s domain timeout, cmd(0x%x)", + get_domain_name(domain), pwr_cmd); + return ret; + } + + return 0; +} + +/** + * delegate_domain() - Delegate control of a domain to MCU + * @ptdev: Device. + * @domain: Domain to delegate the control + * + * Delegating L2 domain is prohibited. + * + * Return: + * * 0 on success or delegated already. + * * -EPERM if domain is L2. + * * A negative error code otherwise. + */ +static int delegate_domain(struct panthor_device *ptdev, u32 domain) +{ + const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_DELEGATE, domain, 0); + const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS); + const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain); + const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain); + u64 val; + int ret; + + if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2)) + return -EPERM; + + /* Already delegated, exit early */ + if (pwr_status & delegated_mask) + return 0; + + /* Check if the command is allowed before delegating. */ + if (!(pwr_status & allow_mask)) { + drm_warn(&ptdev->base, "Delegating %s domain not allowed", get_domain_name(domain)); + return -EPERM; + } + + ret = panthor_pwr_domain_wait_transition(ptdev, domain, PWR_TRANSITION_TIMEOUT_US); + if (ret) + return ret; + + panthor_pwr_write_command(ptdev, pwr_cmd, 0); + + /* + * On successful delegation + * allow-flag will be cleared with delegated-flag being set. + */ + ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, + ((delegated_mask | allow_mask) & val) == delegated_mask, + 10, PWR_TRANSITION_TIMEOUT_US); + if (ret) { + drm_err(&ptdev->base, "Delegating %s domain timeout, cmd(0x%x)", + get_domain_name(domain), pwr_cmd); + return ret; + } + + return 0; +} + +static int panthor_pwr_delegate_domains(struct panthor_device *ptdev) +{ + int ret; + + if (!ptdev->pwr) + return 0; + + ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER); + if (ret) + return ret; + + ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_TILER); + if (ret) + goto err_retract_shader; + + return 0; + +err_retract_shader: + retract_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER); + + return ret; +} + +/** + * panthor_pwr_domain_force_off - Forcefully power down a domain. + * @ptdev: Device. + * @domain: Domain to forcefully power down. + * + * This function will attempt to retract and power off the requested power + * domain. However, if retraction fails, the operation is aborted. If power off + * fails, the domain will remain retracted and under the host control. + * + * Return: 0 on success or a negative error code on failure. + */ +static int panthor_pwr_domain_force_off(struct panthor_device *ptdev, u32 domain) +{ + const u64 domain_ready = gpu_read64(ptdev, get_domain_ready_reg(domain)); + int ret; + + /* Domain already powered down, early exit. */ + if (!domain_ready) + return 0; + + /* Domain has to be in host control to issue power off command. */ + ret = retract_domain(ptdev, domain); + if (ret) + return ret; + + return panthor_pwr_domain_power_off(ptdev, domain, domain_ready, PWR_TRANSITION_TIMEOUT_US); +} + +void panthor_pwr_unplug(struct panthor_device *ptdev) +{ + unsigned long flags; + + if (!ptdev->pwr) + return; + + /* Make sure the IRQ handler is not running after that point. */ + panthor_pwr_irq_suspend(&ptdev->pwr->irq); + + /* Wake-up all waiters. */ + spin_lock_irqsave(&ptdev->pwr->reqs_lock, flags); + ptdev->pwr->pending_reqs = 0; + wake_up_all(&ptdev->pwr->reqs_acked); + spin_unlock_irqrestore(&ptdev->pwr->reqs_lock, flags); +} + +int panthor_pwr_init(struct panthor_device *ptdev) +{ + struct panthor_pwr *pwr; + int err, irq; + + if (!panthor_hw_has_pwr_ctrl(ptdev)) + return 0; + + pwr = drmm_kzalloc(&ptdev->base, sizeof(*pwr), GFP_KERNEL); + if (!pwr) + return -ENOMEM; + + spin_lock_init(&pwr->reqs_lock); + init_waitqueue_head(&pwr->reqs_acked); + ptdev->pwr = pwr; + + irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu"); + if (irq < 0) + return irq; + + err = panthor_request_pwr_irq(ptdev, &pwr->irq, irq, PWR_INTERRUPTS_MASK); + if (err) + return err; + + return 0; +} + +int panthor_pwr_reset_soft(struct panthor_device *ptdev) +{ + if (!(gpu_read64(ptdev, PWR_STATUS) & PWR_STATUS_ALLOW_SOFT_RESET)) { + drm_err(&ptdev->base, "RESET_SOFT not allowed"); + return -EOPNOTSUPP; + } + + return panthor_pwr_reset(ptdev, PWR_COMMAND_RESET_SOFT); +} + +void panthor_pwr_l2_power_off(struct panthor_device *ptdev) +{ + const u64 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2); + const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS); + + /* Abort if L2 power off constraints are not satisfied */ + if (!(pwr_status & l2_allow_mask)) { + drm_warn(&ptdev->base, "Power off L2 domain not allowed"); + return; + } + + /* It is expected that when halting the MCU, it would power down its + * delegated domains. However, an unresponsive or hung MCU may not do + * so, which is why we need to check and retract the domains back into + * host control to be powered down in the right order before powering + * down the L2. + */ + if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_TILER)) + return; + + if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_SHADER)) + return; + + panthor_pwr_domain_power_off(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present, + PWR_TRANSITION_TIMEOUT_US); +} + +int panthor_pwr_l2_power_on(struct panthor_device *ptdev) +{ + const u32 pwr_status = gpu_read64(ptdev, PWR_STATUS); + const u32 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2); + int ret; + + if ((pwr_status & l2_allow_mask) == 0) { + drm_warn(&ptdev->base, "Power on L2 domain not allowed"); + return -EPERM; + } + + ret = panthor_pwr_domain_power_on(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present, + PWR_TRANSITION_TIMEOUT_US); + if (ret) + return ret; + + /* Delegate control of the shader and tiler power domains to the MCU as + * it can better manage which shader/tiler cores need to be powered up + * or can be powered down based on currently running jobs. + * + * If the shader and tiler domains are already delegated to the MCU, + * this call would just return early. + */ + return panthor_pwr_delegate_domains(ptdev); +} + +void panthor_pwr_suspend(struct panthor_device *ptdev) +{ + if (!ptdev->pwr) + return; + + panthor_pwr_irq_suspend(&ptdev->pwr->irq); +} + +void panthor_pwr_resume(struct panthor_device *ptdev) +{ + if (!ptdev->pwr) + return; + + panthor_pwr_irq_resume(&ptdev->pwr->irq, PWR_INTERRUPTS_MASK); +} diff --git a/drivers/gpu/drm/panthor/panthor_pwr.h b/drivers/gpu/drm/panthor/panthor_pwr.h new file mode 100644 index 000000000000..adf1f6136abc --- /dev/null +++ b/drivers/gpu/drm/panthor/panthor_pwr.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ +/* Copyright 2025 ARM Limited. All rights reserved. */ + +#ifndef __PANTHOR_PWR_H__ +#define __PANTHOR_PWR_H__ + +struct panthor_device; + +void panthor_pwr_unplug(struct panthor_device *ptdev); + +int panthor_pwr_init(struct panthor_device *ptdev); + +int panthor_pwr_reset_soft(struct panthor_device *ptdev); + +void panthor_pwr_l2_power_off(struct panthor_device *ptdev); + +int panthor_pwr_l2_power_on(struct panthor_device *ptdev); + +void panthor_pwr_suspend(struct panthor_device *ptdev); + +void panthor_pwr_resume(struct panthor_device *ptdev); + +#endif /* __PANTHOR_PWR_H__ */ diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h index 8bee76d01bf8..08bf06c452d6 100644 --- a/drivers/gpu/drm/panthor/panthor_regs.h +++ b/drivers/gpu/drm/panthor/panthor_regs.h @@ -64,6 +64,8 @@ #define GPU_FAULT_STATUS 0x3C #define GPU_FAULT_ADDR 0x40 +#define GPU_L2_CONFIG 0x48 +#define GPU_L2_CONFIG_ASN_HASH_ENABLE BIT(24) #define GPU_PWR_KEY 0x50 #define GPU_PWR_KEY_UNLOCK 0x2968A819 @@ -72,6 +74,7 @@ #define GPU_FEATURES 0x60 #define GPU_FEATURES_RAY_INTERSECTION BIT(2) +#define GPU_FEATURES_RAY_TRAVERSAL BIT(5) #define GPU_TIMESTAMP_OFFSET 0x88 #define GPU_CYCLE_COUNT 0x90 @@ -110,6 +113,8 @@ #define GPU_REVID 0x280 +#define GPU_ASN_HASH(n) (0x2C0 + ((n) * 4)) + #define GPU_COHERENCY_FEATURES 0x300 #define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name) @@ -205,4 +210,82 @@ #define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000)) #define CSF_GLB_DOORBELL_ID 0 +/* PWR Control registers */ + +#define PWR_CONTROL_BASE 0x800 +#define PWR_CTRL_REG(x) (PWR_CONTROL_BASE + (x)) + +#define PWR_INT_RAWSTAT PWR_CTRL_REG(0x0) +#define PWR_INT_CLEAR PWR_CTRL_REG(0x4) +#define PWR_INT_MASK PWR_CTRL_REG(0x8) +#define PWR_INT_STAT PWR_CTRL_REG(0xc) +#define PWR_IRQ_POWER_CHANGED_SINGLE BIT(0) +#define PWR_IRQ_POWER_CHANGED_ALL BIT(1) +#define PWR_IRQ_DELEGATION_CHANGED BIT(2) +#define PWR_IRQ_RESET_COMPLETED BIT(3) +#define PWR_IRQ_RETRACT_COMPLETED BIT(4) +#define PWR_IRQ_INSPECT_COMPLETED BIT(5) +#define PWR_IRQ_COMMAND_NOT_ALLOWED BIT(30) +#define PWR_IRQ_COMMAND_INVALID BIT(31) + +#define PWR_STATUS PWR_CTRL_REG(0x20) +#define PWR_STATUS_ALLOW_L2 BIT_U64(0) +#define PWR_STATUS_ALLOW_TILER BIT_U64(1) +#define PWR_STATUS_ALLOW_SHADER BIT_U64(8) +#define PWR_STATUS_ALLOW_BASE BIT_U64(14) +#define PWR_STATUS_ALLOW_STACK BIT_U64(15) +#define PWR_STATUS_DOMAIN_ALLOWED(x) BIT_U64(x) +#define PWR_STATUS_DELEGATED_L2 BIT_U64(16) +#define PWR_STATUS_DELEGATED_TILER BIT_U64(17) +#define PWR_STATUS_DELEGATED_SHADER BIT_U64(24) +#define PWR_STATUS_DELEGATED_BASE BIT_U64(30) +#define PWR_STATUS_DELEGATED_STACK BIT_U64(31) +#define PWR_STATUS_DELEGATED_SHIFT 16 +#define PWR_STATUS_DOMAIN_DELEGATED(x) BIT_U64((x) + PWR_STATUS_DELEGATED_SHIFT) +#define PWR_STATUS_ALLOW_SOFT_RESET BIT_U64(33) +#define PWR_STATUS_ALLOW_FAST_RESET BIT_U64(34) +#define PWR_STATUS_POWER_PENDING BIT_U64(41) +#define PWR_STATUS_RESET_PENDING BIT_U64(42) +#define PWR_STATUS_RETRACT_PENDING BIT_U64(43) +#define PWR_STATUS_INSPECT_PENDING BIT_U64(44) + +#define PWR_COMMAND PWR_CTRL_REG(0x28) +#define PWR_COMMAND_POWER_UP 0x10 +#define PWR_COMMAND_POWER_DOWN 0x11 +#define PWR_COMMAND_DELEGATE 0x20 +#define PWR_COMMAND_RETRACT 0x21 +#define PWR_COMMAND_RESET_SOFT 0x31 +#define PWR_COMMAND_RESET_FAST 0x32 +#define PWR_COMMAND_INSPECT 0xF0 +#define PWR_COMMAND_DOMAIN_L2 0 +#define PWR_COMMAND_DOMAIN_TILER 1 +#define PWR_COMMAND_DOMAIN_SHADER 8 +#define PWR_COMMAND_DOMAIN_BASE 14 +#define PWR_COMMAND_DOMAIN_STACK 15 +#define PWR_COMMAND_SUBDOMAIN_RTU BIT(0) +#define PWR_COMMAND_DEF(cmd, domain, subdomain) \ + (((subdomain) << 16) | ((domain) << 8) | (cmd)) + +#define PWR_CMDARG PWR_CTRL_REG(0x30) + +#define PWR_L2_PRESENT PWR_CTRL_REG(0x100) +#define PWR_L2_READY PWR_CTRL_REG(0x108) +#define PWR_L2_PWRTRANS PWR_CTRL_REG(0x110) +#define PWR_L2_PWRACTIVE PWR_CTRL_REG(0x118) +#define PWR_TILER_PRESENT PWR_CTRL_REG(0x140) +#define PWR_TILER_READY PWR_CTRL_REG(0x148) +#define PWR_TILER_PWRTRANS PWR_CTRL_REG(0x150) +#define PWR_TILER_PWRACTIVE PWR_CTRL_REG(0x158) +#define PWR_SHADER_PRESENT PWR_CTRL_REG(0x200) +#define PWR_SHADER_READY PWR_CTRL_REG(0x208) +#define PWR_SHADER_PWRTRANS PWR_CTRL_REG(0x210) +#define PWR_SHADER_PWRACTIVE PWR_CTRL_REG(0x218) +#define PWR_BASE_PRESENT PWR_CTRL_REG(0x380) +#define PWR_BASE_READY PWR_CTRL_REG(0x388) +#define PWR_BASE_PWRTRANS PWR_CTRL_REG(0x390) +#define PWR_BASE_PWRACTIVE PWR_CTRL_REG(0x398) +#define PWR_STACK_PRESENT PWR_CTRL_REG(0x3c0) +#define PWR_STACK_READY PWR_CTRL_REG(0x3c8) +#define PWR_STACK_PWRTRANS PWR_CTRL_REG(0x3d0) + #endif diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 3d1f57e3990f..b834123a6560 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -5,6 +5,7 @@ #include <drm/drm_exec.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <drm/gpu_scheduler.h> #include <drm/panthor_drm.h> @@ -360,17 +361,23 @@ struct panthor_queue { /** @entity: DRM scheduling entity used for this queue. */ struct drm_sched_entity entity; - /** - * @remaining_time: Time remaining before the job timeout expires. - * - * The job timeout is suspended when the queue is not scheduled by the - * FW. Every time we suspend the timer, we need to save the remaining - * time so we can restore it later on. - */ - unsigned long remaining_time; + /** @name: DRM scheduler name for this queue. */ + char *name; - /** @timeout_suspended: True if the job timeout was suspended. */ - bool timeout_suspended; + /** @timeout: Queue timeout related fields. */ + struct { + /** @timeout.work: Work executed when a queue timeout occurs. */ + struct delayed_work work; + + /** + * @timeout.remaining: Time remaining before a queue timeout. + * + * When the timer is running, this value is set to MAX_SCHEDULE_TIMEOUT. + * When the timer is suspended, it's set to the time remaining when the + * timer was suspended. + */ + unsigned long remaining; + } timeout; /** * @doorbell_id: Doorbell assigned to this queue. @@ -895,11 +902,18 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * if (IS_ERR_OR_NULL(queue)) return; - drm_sched_entity_destroy(&queue->entity); + /* This should have been disabled before that point. */ + drm_WARN_ON(&group->ptdev->base, + disable_delayed_work_sync(&queue->timeout.work)); + + if (queue->entity.fence_context) + drm_sched_entity_destroy(&queue->entity); if (queue->scheduler.ops) drm_sched_fini(&queue->scheduler); + kfree(queue->name); + panthor_queue_put_syncwait_obj(queue); panthor_kernel_bo_destroy(queue->ringbuf); @@ -1039,6 +1053,115 @@ group_unbind_locked(struct panthor_group *group) return 0; } +static bool +group_is_idle(struct panthor_group *group) +{ + struct panthor_device *ptdev = group->ptdev; + u32 inactive_queues; + + if (group->csg_id >= 0) + return ptdev->scheduler->csg_slots[group->csg_id].idle; + + inactive_queues = group->idle_queues | group->blocked_queues; + return hweight32(inactive_queues) == group->queue_count; +} + +static void +queue_reset_timeout_locked(struct panthor_queue *queue) +{ + lockdep_assert_held(&queue->fence_ctx.lock); + + if (queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT) { + mod_delayed_work(queue->scheduler.timeout_wq, + &queue->timeout.work, + msecs_to_jiffies(JOB_TIMEOUT_MS)); + } +} + +static bool +group_can_run(struct panthor_group *group) +{ + return group->state != PANTHOR_CS_GROUP_TERMINATED && + group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && + !group->destroyed && group->fatal_queues == 0 && + !group->timedout; +} + +static bool +queue_timeout_is_suspended(struct panthor_queue *queue) +{ + /* When running, the remaining time is set to MAX_SCHEDULE_TIMEOUT. */ + return queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT; +} + +static void +queue_suspend_timeout_locked(struct panthor_queue *queue) +{ + unsigned long qtimeout, now; + struct panthor_group *group; + struct panthor_job *job; + bool timer_was_active; + + lockdep_assert_held(&queue->fence_ctx.lock); + + /* Already suspended, nothing to do. */ + if (queue_timeout_is_suspended(queue)) + return; + + job = list_first_entry_or_null(&queue->fence_ctx.in_flight_jobs, + struct panthor_job, node); + group = job ? job->group : NULL; + + /* If the queue is blocked and the group is idle, we want the timer to + * keep running because the group can't be unblocked by other queues, + * so it has to come from an external source, and we want to timebox + * this external signalling. + */ + if (group && group_can_run(group) && + (group->blocked_queues & BIT(job->queue_idx)) && + group_is_idle(group)) + return; + + now = jiffies; + qtimeout = queue->timeout.work.timer.expires; + + /* Cancel the timer. */ + timer_was_active = cancel_delayed_work(&queue->timeout.work); + if (!timer_was_active || !job) + queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); + else if (time_after(qtimeout, now)) + queue->timeout.remaining = qtimeout - now; + else + queue->timeout.remaining = 0; + + if (WARN_ON_ONCE(queue->timeout.remaining > msecs_to_jiffies(JOB_TIMEOUT_MS))) + queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); +} + +static void +queue_suspend_timeout(struct panthor_queue *queue) +{ + spin_lock(&queue->fence_ctx.lock); + queue_suspend_timeout_locked(queue); + spin_unlock(&queue->fence_ctx.lock); +} + +static void +queue_resume_timeout(struct panthor_queue *queue) +{ + spin_lock(&queue->fence_ctx.lock); + + if (queue_timeout_is_suspended(queue)) { + mod_delayed_work(queue->scheduler.timeout_wq, + &queue->timeout.work, + queue->timeout.remaining); + + queue->timeout.remaining = MAX_SCHEDULE_TIMEOUT; + } + + spin_unlock(&queue->fence_ctx.lock); +} + /** * cs_slot_prog_locked() - Program a queue slot * @ptdev: Device. @@ -1077,10 +1200,8 @@ cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) CS_IDLE_EMPTY | CS_STATE_MASK | CS_EXTRACT_EVENT); - if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) { - drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time); - queue->timeout_suspended = false; - } + if (queue->iface.input->insert != queue->iface.input->extract) + queue_resume_timeout(queue); } /** @@ -1107,14 +1228,7 @@ cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) CS_STATE_STOP, CS_STATE_MASK); - /* If the queue is blocked, we want to keep the timeout running, so - * we can detect unbounded waits and kill the group when that happens. - */ - if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) { - queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); - queue->timeout_suspended = true; - WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS)); - } + queue_suspend_timeout(queue); return 0; } @@ -1133,11 +1247,13 @@ csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id) { struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; struct panthor_fw_csg_iface *csg_iface; + u64 endpoint_req; lockdep_assert_held(&ptdev->scheduler->lock); csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); - csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28; + endpoint_req = panthor_fw_csg_endpoint_req_get(ptdev, csg_iface); + csg_slot->priority = CSG_EP_REQ_PRIORITY_GET(endpoint_req); } /** @@ -1297,6 +1413,7 @@ csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority) struct panthor_csg_slot *csg_slot; struct panthor_group *group; u32 queue_mask = 0, i; + u64 endpoint_req; lockdep_assert_held(&ptdev->scheduler->lock); @@ -1323,10 +1440,12 @@ csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority) csg_iface->input->allow_compute = group->compute_core_mask; csg_iface->input->allow_fragment = group->fragment_core_mask; csg_iface->input->allow_other = group->tiler_core_mask; - csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | - CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | - CSG_EP_REQ_TILER(group->max_tiler_cores) | - CSG_EP_REQ_PRIORITY(priority); + endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | + CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | + CSG_EP_REQ_TILER(group->max_tiler_cores) | + CSG_EP_REQ_PRIORITY(priority); + panthor_fw_csg_endpoint_req_set(ptdev, csg_iface, endpoint_req); + csg_iface->input->config = panthor_vm_as(group->vm); if (group->suspend_buf) @@ -1411,7 +1530,7 @@ cs_slot_process_fault_event_locked(struct panthor_device *ptdev, fault = cs_iface->output->fault; info = cs_iface->output->fault_info; - if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) { + if (queue) { u64 cs_extract = queue->iface.output->extract; struct panthor_job *job; @@ -1909,28 +2028,6 @@ tick_ctx_is_full(const struct panthor_scheduler *sched, return ctx->group_count == sched->csg_slot_count; } -static bool -group_is_idle(struct panthor_group *group) -{ - struct panthor_device *ptdev = group->ptdev; - u32 inactive_queues; - - if (group->csg_id >= 0) - return ptdev->scheduler->csg_slots[group->csg_id].idle; - - inactive_queues = group->idle_queues | group->blocked_queues; - return hweight32(inactive_queues) == group->queue_count; -} - -static bool -group_can_run(struct panthor_group *group) -{ - return group->state != PANTHOR_CS_GROUP_TERMINATED && - group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && - !group->destroyed && group->fatal_queues == 0 && - !group->timedout; -} - static void tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx, @@ -2224,9 +2321,9 @@ tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *c continue; } - panthor_fw_update_reqs(csg_iface, endpoint_req, - CSG_EP_REQ_PRIORITY(new_csg_prio), - CSG_EP_REQ_PRIORITY_MASK); + panthor_fw_csg_endpoint_req_update(ptdev, csg_iface, + CSG_EP_REQ_PRIORITY(new_csg_prio), + CSG_EP_REQ_PRIORITY_MASK); csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, CSG_ENDPOINT_CONFIG); @@ -2612,6 +2709,7 @@ static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) static void queue_stop(struct panthor_queue *queue, struct panthor_job *bad_job) { + disable_delayed_work_sync(&queue->timeout.work); drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); } @@ -2623,6 +2721,7 @@ static void queue_start(struct panthor_queue *queue) list_for_each_entry(job, &queue->scheduler.pending_list, base.list) job->base.s_fence->parent = dma_fence_get(job->done_fence); + enable_delayed_work(&queue->timeout.work); drm_sched_start(&queue->scheduler, 0); } @@ -2689,7 +2788,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev) { struct panthor_scheduler *sched = ptdev->scheduler; struct panthor_csg_slots_upd_ctx upd_ctx; - struct panthor_group *group; u32 suspended_slots; u32 i; @@ -2743,13 +2841,23 @@ void panthor_sched_suspend(struct panthor_device *ptdev) while (slot_mask) { u32 csg_id = ffs(slot_mask) - 1; struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; + struct panthor_group *group = csg_slot->group; /* Terminate command timedout, but the soft-reset will * automatically terminate all active groups, so let's * force the state to halted here. */ - if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED) - csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; + if (group->state != PANTHOR_CS_GROUP_TERMINATED) { + group->state = PANTHOR_CS_GROUP_TERMINATED; + + /* Reset the queue slots manually if the termination + * request failed. + */ + for (i = 0; i < group->queue_count; i++) { + if (group->queues[i]) + cs_slot_reset_locked(ptdev, csg_id, i); + } + } slot_mask &= ~BIT(csg_id); } } @@ -2779,8 +2887,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev) for (i = 0; i < sched->csg_slot_count; i++) { struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; + struct panthor_group *group = csg_slot->group; - group = csg_slot->group; if (!group) continue; @@ -2909,35 +3017,47 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) xa_unlock(&gpool->xa); } -static void group_sync_upd_work(struct work_struct *work) +static bool queue_check_job_completion(struct panthor_queue *queue) { - struct panthor_group *group = - container_of(work, struct panthor_group, sync_upd_work); + struct panthor_syncobj_64b *syncobj = NULL; struct panthor_job *job, *job_tmp; + bool cookie, progress = false; LIST_HEAD(done_jobs); - u32 queue_idx; - bool cookie; cookie = dma_fence_begin_signalling(); - for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { - struct panthor_queue *queue = group->queues[queue_idx]; - struct panthor_syncobj_64b *syncobj; + spin_lock(&queue->fence_ctx.lock); + list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { + if (!syncobj) { + struct panthor_group *group = job->group; - if (!queue) - continue; + syncobj = group->syncobjs->kmap + + (job->queue_idx * sizeof(*syncobj)); + } - syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj)); + if (syncobj->seqno < job->done_fence->seqno) + break; - spin_lock(&queue->fence_ctx.lock); - list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { - if (syncobj->seqno < job->done_fence->seqno) - break; + list_move_tail(&job->node, &done_jobs); + dma_fence_signal_locked(job->done_fence); + } - list_move_tail(&job->node, &done_jobs); - dma_fence_signal_locked(job->done_fence); - } - spin_unlock(&queue->fence_ctx.lock); + if (list_empty(&queue->fence_ctx.in_flight_jobs)) { + /* If we have no job left, we cancel the timer, and reset remaining + * time to its default so it can be restarted next time + * queue_resume_timeout() is called. + */ + queue_suspend_timeout_locked(queue); + + /* If there's no job pending, we consider it progress to avoid a + * spurious timeout if the timeout handler and the sync update + * handler raced. + */ + progress = true; + } else if (!list_empty(&done_jobs)) { + queue_reset_timeout_locked(queue); + progress = true; } + spin_unlock(&queue->fence_ctx.lock); dma_fence_end_signalling(cookie); list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { @@ -2947,6 +3067,27 @@ static void group_sync_upd_work(struct work_struct *work) panthor_job_put(&job->base); } + return progress; +} + +static void group_sync_upd_work(struct work_struct *work) +{ + struct panthor_group *group = + container_of(work, struct panthor_group, sync_upd_work); + u32 queue_idx; + bool cookie; + + cookie = dma_fence_begin_signalling(); + for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { + struct panthor_queue *queue = group->queues[queue_idx]; + + if (!queue) + continue; + + queue_check_job_completion(queue); + } + dma_fence_end_signalling(cookie); + group_put(group); } @@ -3194,17 +3335,6 @@ queue_run_job(struct drm_sched_job *sched_job) queue->iface.input->insert = job->ringbuf.end; if (group->csg_id < 0) { - /* If the queue is blocked, we want to keep the timeout running, so we - * can detect unbounded waits and kill the group when that happens. - * Otherwise, we suspend the timeout so the time we spend waiting for - * a CSG slot is not counted. - */ - if (!(group->blocked_queues & BIT(job->queue_idx)) && - !queue->timeout_suspended) { - queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); - queue->timeout_suspended = true; - } - group_schedule_locked(group, BIT(job->queue_idx)); } else { gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1); @@ -3213,6 +3343,7 @@ queue_run_job(struct drm_sched_job *sched_job) pm_runtime_get(ptdev->base.dev); sched->pm.has_ref = true; } + queue_resume_timeout(queue); panthor_devfreq_record_busy(sched->ptdev); } @@ -3262,7 +3393,6 @@ queue_timedout_job(struct drm_sched_job *sched_job) mutex_unlock(&sched->lock); queue_start(queue); - return DRM_GPU_SCHED_STAT_RESET; } @@ -3305,11 +3435,23 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev, return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64)); } +static void queue_timeout_work(struct work_struct *work) +{ + struct panthor_queue *queue = container_of(work, struct panthor_queue, + timeout.work.work); + bool progress; + + progress = queue_check_job_completion(queue); + if (!progress) + drm_sched_fault(&queue->scheduler); +} + static struct panthor_queue * group_create_queue(struct panthor_group *group, - const struct drm_panthor_queue_create *args) + const struct drm_panthor_queue_create *args, + u64 drm_client_id, u32 gid, u32 qid) { - const struct drm_sched_init_args sched_args = { + struct drm_sched_init_args sched_args = { .ops = &panthor_queue_sched_ops, .submit_wq = group->ptdev->scheduler->wq, .num_rqs = 1, @@ -3320,9 +3462,8 @@ group_create_queue(struct panthor_group *group, * their profiling status. */ .credit_limit = args->ringbuf_size / sizeof(u64), - .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), + .timeout = MAX_SCHEDULE_TIMEOUT, .timeout_wq = group->ptdev->reset.wq, - .name = "panthor-queue", .dev = group->ptdev->base.dev, }; struct drm_gpu_scheduler *drm_sched; @@ -3343,6 +3484,8 @@ group_create_queue(struct panthor_group *group, if (!queue) return ERR_PTR(-ENOMEM); + queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); + INIT_DELAYED_WORK(&queue->timeout.work, queue_timeout_work); queue->fence_ctx.id = dma_fence_context_alloc(1); spin_lock_init(&queue->fence_ctx.lock); INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs); @@ -3397,12 +3540,23 @@ group_create_queue(struct panthor_group *group, if (ret) goto err_free_queue; + /* assign a unique name */ + queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid); + if (!queue->name) { + ret = -ENOMEM; + goto err_free_queue; + } + + sched_args.name = queue->name; + ret = drm_sched_init(&queue->scheduler, &sched_args); if (ret) goto err_free_queue; drm_sched = &queue->scheduler; ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL); + if (ret) + goto err_free_queue; return queue; @@ -3446,7 +3600,8 @@ static void add_group_kbo_sizes(struct panthor_device *ptdev, int panthor_group_create(struct panthor_file *pfile, const struct drm_panthor_group_create *group_args, - const struct drm_panthor_queue_create *queue_args) + const struct drm_panthor_queue_create *queue_args, + u64 drm_client_id) { struct panthor_device *ptdev = pfile->ptdev; struct panthor_group_pool *gpool = pfile->groups; @@ -3539,12 +3694,16 @@ int panthor_group_create(struct panthor_file *pfile, memset(group->syncobjs->kmap, 0, group_args->queues.count * sizeof(struct panthor_syncobj_64b)); + ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); + if (ret) + goto err_put_group; + for (i = 0; i < group_args->queues.count; i++) { - group->queues[i] = group_create_queue(group, &queue_args[i]); + group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i); if (IS_ERR(group->queues[i])) { ret = PTR_ERR(group->queues[i]); group->queues[i] = NULL; - goto err_put_group; + goto err_erase_gid; } group->queue_count++; @@ -3552,10 +3711,6 @@ int panthor_group_create(struct panthor_file *pfile, group->idle_queues = GENMASK(group->queue_count - 1, 0); - ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); - if (ret) - goto err_put_group; - mutex_lock(&sched->reset.lock); if (atomic_read(&sched->reset.in_progress)) { panthor_group_stop(group); @@ -3574,6 +3729,9 @@ int panthor_group_create(struct panthor_file *pfile, return gid; +err_erase_gid: + xa_erase(&gpool->xa, gid); + err_put_group: group_put(group); return ret; @@ -3855,7 +4013,9 @@ void panthor_sched_unplug(struct panthor_device *ptdev) { struct panthor_scheduler *sched = ptdev->scheduler; - cancel_delayed_work_sync(&sched->tick_work); + disable_delayed_work_sync(&sched->tick_work); + disable_work_sync(&sched->fw_events_work); + disable_work_sync(&sched->sync_upd_work); mutex_lock(&sched->lock); if (sched->pm.has_ref) { @@ -3873,8 +4033,6 @@ static void panthor_sched_fini(struct drm_device *ddev, void *res) if (!sched || !sched->csg_slot_count) return; - cancel_delayed_work_sync(&sched->tick_work); - if (sched->wq) destroy_workqueue(sched->wq); diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h index 742b0b4ff3a3..f4a475aa34c0 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.h +++ b/drivers/gpu/drm/panthor/panthor_sched.h @@ -21,7 +21,8 @@ struct panthor_job; int panthor_group_create(struct panthor_file *pfile, const struct drm_panthor_group_create *group_args, - const struct drm_panthor_queue_create *queue_args); + const struct drm_panthor_queue_create *queue_args, + u64 drm_client_id); int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle); int panthor_group_get_state(struct panthor_file *pfile, struct drm_panthor_group_get_state *get_state); diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index b9fe926a49e8..3a9661b9b1fc 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -20,6 +20,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "pl111_drm.h" @@ -473,12 +474,15 @@ static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, return best_div; } -static long pl111_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int pl111_clk_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - int div = pl111_clk_div_choose_div(hw, rate, prate, true); + int div = pl111_clk_div_choose_div(hw, req->rate, + &req->best_parent_rate, true); - return DIV_ROUND_UP_ULL(*prate, div); + req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div); + + return 0; } static unsigned long pl111_clk_div_recalc_rate(struct clk_hw *hw, @@ -528,7 +532,7 @@ static int pl111_clk_div_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops pl111_clk_div_ops = { .recalc_rate = pl111_clk_div_recalc_rate, - .round_rate = pl111_clk_div_round_rate, + .determine_rate = pl111_clk_div_determine_rate, .set_rate = pl111_clk_div_set_rate, }; diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index d6ea01f3797b..2e3200db2f39 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -27,6 +27,7 @@ #include <linux/delay.h> +#include <drm/drm_print.h> #include <drm/drm_util.h> #include "qxl_drv.h" diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 2d9ed3b94574..b66b14b08b61 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -30,6 +30,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include "qxl_drv.h" #include "qxl_object.h" diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index ae7e572b1b4a..a134820aac58 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -34,9 +34,12 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #include "qxl_drv.h" #include "qxl_object.h" @@ -382,7 +385,25 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, static void qxl_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_device *dev = crtc->dev; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + struct drm_pending_vblank_event *event; + qxl_crtc_update_monitors_config(crtc, "flush"); + + spin_lock_irq(&dev->event_lock); + + event = crtc_state->event; + crtc_state->event = NULL; + + if (event) { + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + } + + spin_unlock_irq(&dev->event_lock); } static void qxl_crtc_destroy(struct drm_crtc *crtc) @@ -401,6 +422,7 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + DRM_CRTC_VBLANK_TIMER_FUNCS, }; static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, @@ -455,11 +477,15 @@ static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { qxl_crtc_update_monitors_config(crtc, "enable"); + + drm_crtc_vblank_on(crtc); } static void qxl_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { + drm_crtc_vblank_off(crtc); + qxl_crtc_update_monitors_config(crtc, "disable"); } @@ -1276,6 +1302,10 @@ int qxl_modeset_init(struct qxl_device *qdev) qxl_display_read_client_monitors_config(qdev); + ret = drm_vblank_init(&qdev->ddev, qxl_num_crtc); + if (ret) + return ret; + drm_mode_config_reset(&qdev->ddev); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 417061ae59eb..2bbb1168a3ff 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -44,6 +44,7 @@ #include <drm/drm_module.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "qxl_object.h" diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index fc5e3763c359..4939b57a2a48 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -24,6 +24,7 @@ */ #include <drm/drm.h> +#include <drm/drm_print.h> #include "qxl_drv.h" #include "qxl_object.h" @@ -39,7 +40,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj) qxl_surface_evict(qdev, qobj, false); tbo = &qobj->tbo; - ttm_bo_put(tbo); + ttm_bo_fini(tbo); } int qxl_gem_object_create(struct qxl_device *qdev, int size, diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index ffff54e5fb31..3cc45997533d 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -26,6 +26,8 @@ #include <linux/gfp.h> #include <linux/slab.h> +#include <drm/drm_print.h> + #include "qxl_drv.h" #include "qxl_object.h" diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 506ae1f5e099..336cbff26089 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -26,6 +26,8 @@ #include <linux/pci.h> #include <linux/uaccess.h> +#include <drm/drm_print.h> + #include "qxl_drv.h" #include "qxl_object.h" diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c index 665278ee3b6d..4018bcf808e5 100644 --- a/drivers/gpu/drm/qxl/qxl_irq.c +++ b/drivers/gpu/drm/qxl/qxl_irq.c @@ -26,6 +26,7 @@ #include <linux/pci.h> #include <drm/drm_drv.h> +#include <drm/drm_print.h> #include "qxl_drv.h" diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index dc3828db1991..461b7ab9ad5c 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -28,6 +28,7 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "qxl_drv.h" diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 05204a6a3fa8..7b3c9a6016db 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -22,6 +22,8 @@ #include <linux/delay.h> +#include <drm/drm_print.h> + #include <trace/events/dma_fence.h> #include "qxl_drv.h" diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 765a144cea14..1a40590077dd 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -28,6 +28,7 @@ #include <drm/drm.h> #include <drm/drm_file.h> #include <drm/drm_debugfs.h> +#include <drm/drm_print.h> #include <drm/qxl_drm.h> #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> @@ -196,7 +197,7 @@ int qxl_ttm_init(struct qxl_device *qdev) r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL, qdev->ddev.anon_inode->i_mapping, qdev->ddev.vma_offset_manager, - false, false); + 0); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 63c47585afbc..527b9d19d730 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -80,6 +80,7 @@ #include <drm/drm_gem.h> #include <drm/drm_audio_component.h> #include <drm/drm_suballoc.h> +#include <drm/drm_print.h> #include "radeon_family.h" #include "radeon_mode.h" diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 22ce61bdfc06..08f8ba4fd148 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -408,7 +408,6 @@ static int radeon_atif_handler(struct radeon_device *rdev, pm_runtime_get_sync(rdev_to_drm(rdev)->dev); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(rdev_to_drm(rdev)); - pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev); pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev); } } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 9f6a3df951ba..012d8b2295b8 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -875,10 +875,8 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1066,10 +1064,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1154,10 +1150,8 @@ radeon_tv_detect(struct drm_connector *connector, bool force) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); radeon_connector_update_scratch_regs(connector, ret); - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1402,10 +1396,8 @@ out: } exit: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1714,10 +1706,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) } out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 9e35b14e2bf0..60afaa8e56b4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1635,7 +1635,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, } if (notify_clients) - drm_client_dev_suspend(dev, false); + drm_client_dev_suspend(dev); return 0; } @@ -1739,7 +1739,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) radeon_pm_compute_clocks(rdev); if (notify_clients) - drm_client_dev_resume(dev, false); + drm_client_dev_resume(dev); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 351b9dfcdad8..35fb99bcd9a7 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -644,8 +644,6 @@ radeon_crtc_set_config(struct drm_mode_set *set, if (crtc->enabled) active = true; - pm_runtime_mark_last_busy(dev->dev); - rdev = dev->dev_private; /* if we have active crtcs and we don't have a power ref, take the current one */ diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 9c8907bc61d9..87fd6255c114 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -241,12 +241,12 @@ module_param_named(uvd, radeon_uvd, int, 0444); MODULE_PARM_DESC(vce, "vce enable/disable vce support (1 = enable, 0 = disable)"); module_param_named(vce, radeon_vce, int, 0444); -int radeon_si_support = 1; -MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)"); +int radeon_si_support = -1; +MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled, -1 = default)"); module_param_named(si_support, radeon_si_support, int, 0444); -int radeon_cik_support = 1; -MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)"); +int radeon_cik_support = -1; +MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled, -1 = default)"); module_param_named(cik_support, radeon_cik_support, int, 0444); static const struct pci_device_id pciidlist[] = { @@ -256,12 +256,60 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static const struct drm_driver kms_driver; +static bool radeon_support_enabled(struct device *dev, + const enum radeon_family family) +{ + const char *gen; + int module_param = -1; + bool amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU); + bool support_by_default = true; + + switch (family) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + gen = "SI"; + module_param = radeon_si_support; + amdgpu_support_built &= IS_ENABLED(CONFIG_DRM_AMDGPU_SI); + support_by_default = false; + break; + + case CHIP_BONAIRE: + case CHIP_HAWAII: + support_by_default = false; + fallthrough; + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + gen = "CIK"; + module_param = radeon_cik_support; + amdgpu_support_built &= IS_ENABLED(CONFIG_DRM_AMDGPU_CIK); + break; + + default: + /* All other chips are supported by radeon only */ + return true; + } + + if ((module_param == -1 && (support_by_default || !amdgpu_support_built)) || + module_param == 1) + return true; + + if (!module_param) + dev_info(dev, "%s support disabled by module param\n", gen); + + return false; +} + static int radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long flags = 0; struct drm_device *ddev; struct radeon_device *rdev; + struct device *dev = &pdev->dev; const struct drm_format_info *format; int ret; @@ -270,30 +318,8 @@ static int radeon_pci_probe(struct pci_dev *pdev, flags = ent->driver_data; - if (!radeon_si_support) { - switch (flags & RADEON_FAMILY_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: - dev_info(&pdev->dev, - "SI support disabled by module param\n"); - return -ENODEV; - } - } - if (!radeon_cik_support) { - switch (flags & RADEON_FAMILY_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: - dev_info(&pdev->dev, - "CIK support disabled by module param\n"); - return -ENODEV; - } - } + if (!radeon_support_enabled(dev, flags & RADEON_FAMILY_MASK)) + return -ENODEV; if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; @@ -303,11 +329,11 @@ static int radeon_pci_probe(struct pci_dev *pdev, if (ret) return ret; - rdev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*rdev), ddev); + rdev = devm_drm_dev_alloc(dev, &kms_driver, typeof(*rdev), ddev); if (IS_ERR(rdev)) return PTR_ERR(rdev); - rdev->dev = &pdev->dev; + rdev->dev = dev; rdev->pdev = pdev; ddev = rdev_to_drm(rdev); ddev->dev_private = rdev; @@ -461,7 +487,6 @@ static int radeon_pmops_runtime_idle(struct device *dev) } } - pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ return 1; @@ -483,7 +508,6 @@ long radeon_drm_ioctl(struct file *filp, ret = drm_ioctl(filp, cmd, arg); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index dc81b0c2dbff..fd083aaa91bb 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -154,7 +154,6 @@ static int radeon_fbdev_fb_open(struct fb_info *info, int user) return 0; err_pm_runtime_mark_last_busy: - pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev); pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev); return ret; } @@ -164,7 +163,6 @@ static int radeon_fbdev_fb_release(struct fb_info *info, int user) struct drm_fb_helper *fb_helper = info->par; struct radeon_device *rdev = fb_helper->dev->dev_private; - pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev); pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev); return 0; @@ -184,8 +182,6 @@ static void radeon_fbdev_fb_destroy(struct fb_info *info) radeon_fbdev_destroy_pinned_object(gobj); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops radeon_fbdev_fb_ops = { @@ -206,7 +202,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct radeon_device *rdev = fb_helper->dev->dev_private; const struct drm_format_info *format_info; struct drm_mode_fb_cmd2 mode_cmd = { }; - struct fb_info *info; + struct fb_info *info = fb_helper->info; struct drm_gem_object *gobj; struct radeon_bo *rbo; struct drm_framebuffer *fb; @@ -247,13 +243,6 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, fb_helper->funcs = &radeon_fbdev_fb_helper_funcs; fb_helper->fb = fb; - /* okay we have an object now allocate the framebuffer */ - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_framebuffer_unregister_private; - } - info->fbops = &radeon_fbdev_fb_ops; /* radeon resume is fragile and needs a vt switch to help it along */ @@ -279,10 +268,6 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, return 0; -err_drm_framebuffer_unregister_private: - fb_helper->fb = NULL; - drm_framebuffer_unregister_private(fb); - drm_framebuffer_cleanup(fb); err_kfree: kfree(fb); err_radeon_fbdev_destroy_pinned_object: diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 5b5b54e876d4..167d6f122b8e 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(struct dma_fence *f) if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) return true; - if (down_read_trylock(&rdev->exclusive_lock)) { - radeon_fence_process(rdev, ring); - up_read(&rdev->exclusive_lock); - - if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) - return true; - } return false; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index f86773f3db20..18ca1bcfd2f9 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -86,7 +86,7 @@ static void radeon_gem_object_free(struct drm_gem_object *gobj) if (robj) { radeon_mn_unregister(robj); - ttm_bo_put(&robj->tbo); + ttm_bo_fini(&robj->tbo); } } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index ba1446acd703..7cbe02ffb193 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -169,7 +169,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); pm_runtime_allow(dev->dev); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } @@ -676,7 +675,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) file_priv->driver_priv = fpriv; } - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -686,7 +684,6 @@ err_fpriv: kfree(fpriv); err_suspend: - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; } @@ -736,7 +733,6 @@ void radeon_driver_postclose_kms(struct drm_device *dev, kfree(fpriv); file_priv->driver_priv = NULL; } - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 616d25c8c2de..695ac32f7535 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -683,8 +683,10 @@ int radeon_ttm_init(struct radeon_device *rdev) r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev, rdev_to_drm(rdev)->anon_inode->i_mapping, rdev_to_drm(rdev)->vma_offset_manager, - rdev->need_swiotlb, - dma_addressing_limited(&rdev->pdev->dev)); + (rdev->need_swiotlb ? + TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) | + (dma_addressing_limited(&rdev->pdev->dev) ? + TTM_ALLOCATION_POOL_USE_DMA32 : 0)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c index 7e175dbfd892..2e2906ab750b 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c @@ -17,6 +17,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "rcar_cmm.h" @@ -993,7 +994,7 @@ static void rcar_du_crtc_cleanup(struct drm_crtc *crtc) rcar_du_crtc_crc_cleanup(rcrtc); - return drm_crtc_cleanup(crtc); + drm_crtc_cleanup(crtc); } static void rcar_du_crtc_reset(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c index d948ff3594c4..031d07f4508e 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c @@ -24,6 +24,7 @@ #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "rcar_du_drv.h" diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c index 216219accfd9..6294443f6068 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c @@ -11,6 +11,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -407,8 +408,8 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct rcar_du_device *rcdu = to_rcar_du_device(dev); - unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); unsigned int align; + int ret; /* * The R8A7779 DU requires a 16 pixels pitch alignment as documented, @@ -419,7 +420,9 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, else align = 16 * args->bpp / 8; - args->pitch = roundup(min_pitch, align); + ret = drm_mode_size_dumb(dev, args, align, 0); + if (ret) + return ret; return drm_gem_dma_dumb_create_internal(file, dev, args); } diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c index 5c73a513f678..9413b76d0bfc 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c @@ -5,6 +5,7 @@ * Copyright (C) 2020 Renesas Electronics Corporation */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> @@ -71,6 +72,7 @@ struct rcar_mipi_dsi { } clocks; enum mipi_dsi_pixel_format format; + unsigned long mode_flags; unsigned int num_data_lanes; unsigned int lanes; }; @@ -316,8 +318,8 @@ rcar_mipi_dsi_post_init_phtw_v4h(struct rcar_mipi_dsi *dsi, WRITE_PHTW(0x01020100, 0x00000180); ret = read_poll_timeout(rcar_mipi_dsi_read, status, - status & PHTR_TEST, 2000, 10000, false, - dsi, PHTR); + status & PHTR_TESTDOUT_TEST, + 2000, 10000, false, dsi, PHTR); if (ret < 0) { dev_err(dsi->dev, "failed to test PHTR\n"); return ret; @@ -457,29 +459,43 @@ static void rcar_mipi_dsi_set_display_timing(struct rcar_mipi_dsi *dsi, u32 vprmset4r; /* Configuration for Pixel Stream and Packet Header */ - if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 24) + switch (mipi_dsi_pixel_format_to_bpp(dsi->format)) { + case 24: rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB24); - else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 18) + break; + case 18: rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB18); - else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 16) + break; + case 16: rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB16); - else { + break; + default: dev_warn(dsi->dev, "unsupported format"); return; } /* Configuration for Blanking sequence and Input Pixel */ - setr = TXVMSETR_HSABPEN_EN | TXVMSETR_HBPBPEN_EN - | TXVMSETR_HFPBPEN_EN | TXVMSETR_SYNSEQ_PULSES - | TXVMSETR_PIXWDTH | TXVMSETR_VSTPM; + setr = TXVMSETR_PIXWDTH | TXVMSETR_VSTPM; + + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { + if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) + setr |= TXVMSETR_SYNSEQ_EVENTS; + if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)) + setr |= TXVMSETR_HFPBPEN; + if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)) + setr |= TXVMSETR_HBPBPEN; + if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)) + setr |= TXVMSETR_HSABPEN; + } + rcar_mipi_dsi_write(dsi, TXVMSETR, setr); - /* Configuration for Video Parameters */ - vprmset0r = (mode->flags & DRM_MODE_FLAG_PVSYNC ? - TXVMVPRMSET0R_VSPOL_HIG : TXVMVPRMSET0R_VSPOL_LOW) - | (mode->flags & DRM_MODE_FLAG_PHSYNC ? - TXVMVPRMSET0R_HSPOL_HIG : TXVMVPRMSET0R_HSPOL_LOW) - | TXVMVPRMSET0R_CSPC_RGB | TXVMVPRMSET0R_BPP_24; + /* Configuration for Video Parameters, input is always RGB888 */ + vprmset0r = TXVMVPRMSET0R_BPP_24; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + vprmset0r |= TXVMVPRMSET0R_VSPOL_LOW; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + vprmset0r |= TXVMVPRMSET0R_HSPOL_LOW; vprmset1r = TXVMVPRMSET1R_VACTIVE(mode->vdisplay) | TXVMVPRMSET1R_VSA(mode->vsync_end - mode->vsync_start); @@ -620,6 +636,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, vclkset = VCLKSET_CKEN; rcar_mipi_dsi_write(dsi, VCLKSET, vclkset); + /* Output is always RGB, never YCbCr */ if (dsi_format == 24) vclkset |= VCLKSET_BPP_24; else if (dsi_format == 18) @@ -631,7 +648,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, return -EINVAL; } - vclkset |= VCLKSET_COLOR_RGB | VCLKSET_LANE(dsi->lanes - 1); + vclkset |= VCLKSET_LANE(dsi->lanes - 1); switch (dsi->info->model) { case RCAR_DSI_V3U: @@ -911,6 +928,7 @@ static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host, dsi->lanes = device->lanes; dsi->format = device->format; + dsi->mode_flags = device->mode_flags; dsi->next_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, 1, 0); diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h index 76521276e2af..b6fb58c2f9f6 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h @@ -9,292 +9,311 @@ #define __RCAR_MIPI_DSI_REGS_H__ #define LINKSR 0x010 -#define LINKSR_LPBUSY (1 << 1) -#define LINKSR_HSBUSY (1 << 0) +#define LINKSR_LPBUSY BIT_U32(1) +#define LINKSR_HSBUSY BIT_U32(0) #define TXSETR 0x100 -#define TXSETR_LANECNT_MASK (0x3 << 0) +#define TXSETR_LANECNT_MASK GENMASK_U32(1, 0) /* * DSI Command Transfer Registers */ #define TXCMSETR 0x110 -#define TXCMSETR_SPDTYP (1 << 8) /* 0:HS 1:LP */ -#define TXCMSETR_LPPDACC (1 << 0) +#define TXCMSETR_SPDTYP BIT_U32(8) /* 0:HS 1:LP */ +#define TXCMSETR_LPPDACC BIT_U32(0) #define TXCMCR 0x120 -#define TXCMCR_BTATYP (1 << 2) -#define TXCMCR_BTAREQ (1 << 1) -#define TXCMCR_TXREQ (1 << 0) +#define TXCMCR_BTATYP BIT_U32(2) +#define TXCMCR_BTAREQ BIT_U32(1) +#define TXCMCR_TXREQ BIT_U32(0) #define TXCMSR 0x130 -#define TXCMSR_CLSNERR (1 << 18) -#define TXCMSR_AXIERR (1 << 16) -#define TXCMSR_TXREQEND (1 << 0) +#define TXCMSR_CLSNERR BIT_U32(18) +#define TXCMSR_AXIERR BIT_U32(16) +#define TXCMSR_TXREQEND BIT_U32(0) #define TXCMSCR 0x134 -#define TXCMSCR_CLSNERR (1 << 18) -#define TXCMSCR_AXIERR (1 << 16) -#define TXCMSCR_TXREQEND (1 << 0) +#define TXCMSCR_CLSNERR BIT_U32(18) +#define TXCMSCR_AXIERR BIT_U32(16) +#define TXCMSCR_TXREQEND BIT_U32(0) #define TXCMIER 0x138 -#define TXCMIER_CLSNERR (1 << 18) -#define TXCMIER_AXIERR (1 << 16) -#define TXCMIER_TXREQEND (1 << 0) +#define TXCMIER_CLSNERR BIT_U32(18) +#define TXCMIER_AXIERR BIT_U32(16) +#define TXCMIER_TXREQEND BIT_U32(0) #define TXCMADDRSET0R 0x140 #define TXCMPHDR 0x150 -#define TXCMPHDR_FMT (1 << 24) /* 0:SP 1:LP */ -#define TXCMPHDR_VC(n) (((n) & 0x3) << 22) -#define TXCMPHDR_DT(n) (((n) & 0x3f) << 16) -#define TXCMPHDR_DATA1(n) (((n) & 0xff) << 8) -#define TXCMPHDR_DATA0(n) (((n) & 0xff) << 0) +#define TXCMPHDR_FMT BIT_U32(24) /* 0:SP 1:LP */ +#define TXCMPHDR_VC_MASK GENMASK_U32(23, 22) +#define TXCMPHDR_VC(n) FIELD_PREP(TXCMPHDR_VC_MASK, (n)) +#define TXCMPHDR_DT_MASK GENMASK_U32(21, 16) +#define TXCMPHDR_DT(n) FIELD_PREP(TXCMPHDR_DT_MASK, (n)) +#define TXCMPHDR_DATA1_MASK GENMASK_U32(15, 8) +#define TXCMPHDR_DATA1(n) FIELD_PREP(TXCMPHDR_DATA1_MASK, (n)) +#define TXCMPHDR_DATA0_MASK GENMASK_U32(7, 0) +#define TXCMPHDR_DATA0(n) FIELD_PREP(TXCMPHDR_DATA0_MASK, (n)) #define TXCMPPD0R 0x160 #define TXCMPPD1R 0x164 #define TXCMPPD2R 0x168 #define TXCMPPD3R 0x16c #define RXSETR 0x200 -#define RXSETR_CRCEN (((n) & 0xf) << 24) -#define RXSETR_ECCEN (((n) & 0xf) << 16) +#define RXSETR_CRCEN_MASK GENMASK_U32(27, 24) +#define RXSETR_ECCEN_MASK GENMASK_U32(19, 16) #define RXPSETR 0x210 -#define RXPSETR_LPPDACC (1 << 0) +#define RXPSETR_LPPDACC BIT_U32(0) #define RXPSR 0x220 -#define RXPSR_ECCERR1B (1 << 28) -#define RXPSR_UEXTRGERR (1 << 25) -#define RXPSR_RESPTOERR (1 << 24) -#define RXPSR_OVRERR (1 << 23) -#define RXPSR_AXIERR (1 << 22) -#define RXPSR_CRCERR (1 << 21) -#define RXPSR_WCERR (1 << 20) -#define RXPSR_UEXDTERR (1 << 19) -#define RXPSR_UEXPKTERR (1 << 18) -#define RXPSR_ECCERR (1 << 17) -#define RXPSR_MLFERR (1 << 16) -#define RXPSR_RCVACK (1 << 14) -#define RXPSR_RCVEOT (1 << 10) -#define RXPSR_RCVAKE (1 << 9) -#define RXPSR_RCVRESP (1 << 8) -#define RXPSR_BTAREQEND (1 << 0) +#define RXPSR_ECCERR1B BIT_U32(28) +#define RXPSR_UEXTRGERR BIT_U32(25) +#define RXPSR_RESPTOERR BIT_U32(24) +#define RXPSR_OVRERR BIT_U32(23) +#define RXPSR_AXIERR BIT_U32(22) +#define RXPSR_CRCERR BIT_U32(21) +#define RXPSR_WCERR BIT_U32(20) +#define RXPSR_UEXDTERR BIT_U32(19) +#define RXPSR_UEXPKTERR BIT_U32(18) +#define RXPSR_ECCERR BIT_U32(17) +#define RXPSR_MLFERR BIT_U32(16) +#define RXPSR_RCVACK BIT_U32(14) +#define RXPSR_RCVEOT BIT_U32(10) +#define RXPSR_RCVAKE BIT_U32(9) +#define RXPSR_RCVRESP BIT_U32(8) +#define RXPSR_BTAREQEND BIT_U32(0) #define RXPSCR 0x224 -#define RXPSCR_ECCERR1B (1 << 28) -#define RXPSCR_UEXTRGERR (1 << 25) -#define RXPSCR_RESPTOERR (1 << 24) -#define RXPSCR_OVRERR (1 << 23) -#define RXPSCR_AXIERR (1 << 22) -#define RXPSCR_CRCERR (1 << 21) -#define RXPSCR_WCERR (1 << 20) -#define RXPSCR_UEXDTERR (1 << 19) -#define RXPSCR_UEXPKTERR (1 << 18) -#define RXPSCR_ECCERR (1 << 17) -#define RXPSCR_MLFERR (1 << 16) -#define RXPSCR_RCVACK (1 << 14) -#define RXPSCR_RCVEOT (1 << 10) -#define RXPSCR_RCVAKE (1 << 9) -#define RXPSCR_RCVRESP (1 << 8) -#define RXPSCR_BTAREQEND (1 << 0) +#define RXPSCR_ECCERR1B BIT_U32(28) +#define RXPSCR_UEXTRGERR BIT_U32(25) +#define RXPSCR_RESPTOERR BIT_U32(24) +#define RXPSCR_OVRERR BIT_U32(23) +#define RXPSCR_AXIERR BIT_U32(22) +#define RXPSCR_CRCERR BIT_U32(21) +#define RXPSCR_WCERR BIT_U32(20) +#define RXPSCR_UEXDTERR BIT_U32(19) +#define RXPSCR_UEXPKTERR BIT_U32(18) +#define RXPSCR_ECCERR BIT_U32(17) +#define RXPSCR_MLFERR BIT_U32(16) +#define RXPSCR_RCVACK BIT_U32(14) +#define RXPSCR_RCVEOT BIT_U32(10) +#define RXPSCR_RCVAKE BIT_U32(9) +#define RXPSCR_RCVRESP BIT_U32(8) +#define RXPSCR_BTAREQEND BIT_U32(0) #define RXPIER 0x228 -#define RXPIER_ECCERR1B (1 << 28) -#define RXPIER_UEXTRGERR (1 << 25) -#define RXPIER_RESPTOERR (1 << 24) -#define RXPIER_OVRERR (1 << 23) -#define RXPIER_AXIERR (1 << 22) -#define RXPIER_CRCERR (1 << 21) -#define RXPIER_WCERR (1 << 20) -#define RXPIER_UEXDTERR (1 << 19) -#define RXPIER_UEXPKTERR (1 << 18) -#define RXPIER_ECCERR (1 << 17) -#define RXPIER_MLFERR (1 << 16) -#define RXPIER_RCVACK (1 << 14) -#define RXPIER_RCVEOT (1 << 10) -#define RXPIER_RCVAKE (1 << 9) -#define RXPIER_RCVRESP (1 << 8) -#define RXPIER_BTAREQEND (1 << 0) +#define RXPIER_ECCERR1B BIT_U32(28) +#define RXPIER_UEXTRGERR BIT_U32(25) +#define RXPIER_RESPTOERR BIT_U32(24) +#define RXPIER_OVRERR BIT_U32(23) +#define RXPIER_AXIERR BIT_U32(22) +#define RXPIER_CRCERR BIT_U32(21) +#define RXPIER_WCERR BIT_U32(20) +#define RXPIER_UEXDTERR BIT_U32(19) +#define RXPIER_UEXPKTERR BIT_U32(18) +#define RXPIER_ECCERR BIT_U32(17) +#define RXPIER_MLFERR BIT_U32(16) +#define RXPIER_RCVACK BIT_U32(14) +#define RXPIER_RCVEOT BIT_U32(10) +#define RXPIER_RCVAKE BIT_U32(9) +#define RXPIER_RCVRESP BIT_U32(8) +#define RXPIER_BTAREQEND BIT_U32(0) #define RXPADDRSET0R 0x230 #define RXPSIZESETR 0x238 -#define RXPSIZESETR_SIZE(n) (((n) & 0xf) << 3) +#define RXPSIZESETR_SIZE_MASK GENMASK_U32(6, 3) #define RXPHDR 0x240 -#define RXPHDR_FMT (1 << 24) /* 0:SP 1:LP */ -#define RXPHDR_VC(n) (((n) & 0x3) << 22) -#define RXPHDR_DT(n) (((n) & 0x3f) << 16) -#define RXPHDR_DATA1(n) (((n) & 0xff) << 8) -#define RXPHDR_DATA0(n) (((n) & 0xff) << 0) +#define RXPHDR_FMT BIT_U32(24) /* 0:SP 1:LP */ +#define RXPHDR_VC_MASK GENMASK_U32(23, 22) +#define RXPHDR_DT_MASK GENMASK_U32(21, 16) +#define RXPHDR_DATA1_MASK GENMASK_U32(15, 8) +#define RXPHDR_DATA0_MASK GENMASK_U32(7, 0) #define RXPPD0R 0x250 #define RXPPD1R 0x254 #define RXPPD2R 0x258 #define RXPPD3R 0x25c #define AKEPR 0x300 -#define AKEPR_VC(n) (((n) & 0x3) << 22) -#define AKEPR_DT(n) (((n) & 0x3f) << 16) -#define AKEPR_ERRRPT(n) (((n) & 0xffff) << 0) +#define AKEPR_VC_MASK GENMASK_U32(23, 22) +#define AKEPR_DT_MASK GENMASK_U32(21, 16) +#define AKEPR_ERRRPT_MASK GENMASK_U32(15, 0) #define RXRESPTOSETR 0x400 #define TACR 0x500 #define TASR 0x510 #define TASCR 0x514 #define TAIER 0x518 #define TOSR 0x610 -#define TOSR_TATO (1 << 2) -#define TOSR_LRXHTO (1 << 1) -#define TOSR_HRXTO (1 << 0) +#define TOSR_TATO BIT_U32(2) +#define TOSR_LRXHTO BIT_U32(1) +#define TOSR_HRXTO BIT_U32(0) #define TOSCR 0x614 -#define TOSCR_TATO (1 << 2) -#define TOSCR_LRXHTO (1 << 1) -#define TOSCR_HRXTO (1 << 0) +#define TOSCR_TATO BIT_U32(2) +#define TOSCR_LRXHTO BIT_U32(1) +#define TOSCR_HRXTO BIT_U32(0) /* * Video Mode Register */ #define TXVMSETR 0x180 -#define TXVMSETR_SYNSEQ_PULSES (0 << 16) -#define TXVMSETR_SYNSEQ_EVENTS (1 << 16) -#define TXVMSETR_VSTPM (1 << 15) -#define TXVMSETR_PIXWDTH (1 << 8) -#define TXVMSETR_VSEN_EN (1 << 4) -#define TXVMSETR_VSEN_DIS (0 << 4) -#define TXVMSETR_HFPBPEN_EN (1 << 2) -#define TXVMSETR_HFPBPEN_DIS (0 << 2) -#define TXVMSETR_HBPBPEN_EN (1 << 1) -#define TXVMSETR_HBPBPEN_DIS (0 << 1) -#define TXVMSETR_HSABPEN_EN (1 << 0) -#define TXVMSETR_HSABPEN_DIS (0 << 0) +#define TXVMSETR_SYNSEQ_EVENTS BIT_U32(16) /* 0:Pulses 1:Events */ +#define TXVMSETR_VSTPM BIT_U32(15) +#define TXVMSETR_PIXWDTH_MASK GENMASK_U32(10, 8) +#define TXVMSETR_PIXWDTH BIT_U32(8) /* Only allowed value */ +#define TXVMSETR_VSEN BIT_U32(4) +#define TXVMSETR_HFPBPEN BIT_U32(2) +#define TXVMSETR_HBPBPEN BIT_U32(1) +#define TXVMSETR_HSABPEN BIT_U32(0) #define TXVMCR 0x190 -#define TXVMCR_VFCLR (1 << 12) -#define TXVMCR_EN_VIDEO (1 << 0) +#define TXVMCR_VFCLR BIT_U32(12) +#define TXVMCR_EN_VIDEO BIT_U32(0) #define TXVMSR 0x1a0 -#define TXVMSR_STR (1 << 16) -#define TXVMSR_VFRDY (1 << 12) -#define TXVMSR_ACT (1 << 8) -#define TXVMSR_RDY (1 << 0) +#define TXVMSR_STR BIT_U32(16) +#define TXVMSR_VFRDY BIT_U32(12) +#define TXVMSR_ACT BIT_U32(8) +#define TXVMSR_RDY BIT_U32(0) #define TXVMSCR 0x1a4 -#define TXVMSCR_STR (1 << 16) +#define TXVMSCR_STR BIT_U32(16) #define TXVMPSPHSETR 0x1c0 -#define TXVMPSPHSETR_DT_RGB16 (0x0e << 16) -#define TXVMPSPHSETR_DT_RGB18 (0x1e << 16) -#define TXVMPSPHSETR_DT_RGB18_LS (0x2e << 16) -#define TXVMPSPHSETR_DT_RGB24 (0x3e << 16) -#define TXVMPSPHSETR_DT_YCBCR16 (0x2c << 16) +#define TXVMPSPHSETR_DT_MASK (0x3f << 16) +#define TXVMPSPHSETR_DT_RGB16 FIELD_PREP(TXVMPSPHSETR_DT_MASK, 0x0e) +#define TXVMPSPHSETR_DT_RGB18 FIELD_PREP(TXVMPSPHSETR_DT_MASK, 0x1e) +#define TXVMPSPHSETR_DT_RGB18_LS FIELD_PREP(TXVMPSPHSETR_DT_MASK, 0x2e) +#define TXVMPSPHSETR_DT_RGB24 FIELD_PREP(TXVMPSPHSETR_DT_MASK, 0x3e) +#define TXVMPSPHSETR_DT_YCBCR16 FIELD_PREP(TXVMPSPHSETR_DT_MASK, 0x2c) #define TXVMVPRMSET0R 0x1d0 -#define TXVMVPRMSET0R_HSPOL_HIG (0 << 17) -#define TXVMVPRMSET0R_HSPOL_LOW (1 << 17) -#define TXVMVPRMSET0R_VSPOL_HIG (0 << 16) -#define TXVMVPRMSET0R_VSPOL_LOW (1 << 16) -#define TXVMVPRMSET0R_CSPC_RGB (0 << 4) -#define TXVMVPRMSET0R_CSPC_YCbCr (1 << 4) -#define TXVMVPRMSET0R_BPP_16 (0 << 0) -#define TXVMVPRMSET0R_BPP_18 (1 << 0) -#define TXVMVPRMSET0R_BPP_24 (2 << 0) +#define TXVMVPRMSET0R_HSPOL_LOW BIT_U32(17) /* 0:High 1:Low */ +#define TXVMVPRMSET0R_VSPOL_LOW BIT_U32(16) /* 0:High 1:Low */ +#define TXVMVPRMSET0R_CSPC_YCbCr BIT_U32(4) /* 0:RGB 1:YCbCr */ +#define TXVMVPRMSET0R_BPP_MASK GENMASK_U32(2, 0) +#define TXVMVPRMSET0R_BPP_16 FIELD_PREP(TXVMVPRMSET0R_BPP_MASK, 0) +#define TXVMVPRMSET0R_BPP_18 FIELD_PREP(TXVMVPRMSET0R_BPP_MASK, 1) +#define TXVMVPRMSET0R_BPP_24 FIELD_PREP(TXVMVPRMSET0R_BPP_MASK, 2) #define TXVMVPRMSET1R 0x1d4 -#define TXVMVPRMSET1R_VACTIVE(x) (((x) & 0x7fff) << 16) -#define TXVMVPRMSET1R_VSA(x) (((x) & 0xfff) << 0) +#define TXVMVPRMSET1R_VACTIVE_MASK GENMASK_U32(30, 16) +#define TXVMVPRMSET1R_VACTIVE(n) FIELD_PREP(TXVMVPRMSET1R_VACTIVE_MASK, (n)) +#define TXVMVPRMSET1R_VSA_MASK GENMASK_U32(11, 0) +#define TXVMVPRMSET1R_VSA(n) FIELD_PREP(TXVMVPRMSET1R_VSA_MASK, (n)) #define TXVMVPRMSET2R 0x1d8 -#define TXVMVPRMSET2R_VFP(x) (((x) & 0x1fff) << 16) -#define TXVMVPRMSET2R_VBP(x) (((x) & 0x1fff) << 0) +#define TXVMVPRMSET2R_VFP_MASK GENMASK_U32(28, 16) +#define TXVMVPRMSET2R_VFP(n) FIELD_PREP(TXVMVPRMSET2R_VFP_MASK, (n)) +#define TXVMVPRMSET2R_VBP_MASK GENMASK_U32(12, 0) +#define TXVMVPRMSET2R_VBP(n) FIELD_PREP(TXVMVPRMSET2R_VBP_MASK, (n)) #define TXVMVPRMSET3R 0x1dc -#define TXVMVPRMSET3R_HACTIVE(x) (((x) & 0x7fff) << 16) -#define TXVMVPRMSET3R_HSA(x) (((x) & 0xfff) << 0) +#define TXVMVPRMSET3R_HACTIVE_MASK GENMASK_U32(30, 16) +#define TXVMVPRMSET3R_HACTIVE(n) FIELD_PREP(TXVMVPRMSET3R_HACTIVE_MASK, (n)) +#define TXVMVPRMSET3R_HSA_MASK GENMASK_U32(11, 0) +#define TXVMVPRMSET3R_HSA(n) FIELD_PREP(TXVMVPRMSET3R_HSA_MASK, (n)) #define TXVMVPRMSET4R 0x1e0 -#define TXVMVPRMSET4R_HFP(x) (((x) & 0x1fff) << 16) -#define TXVMVPRMSET4R_HBP(x) (((x) & 0x1fff) << 0) +#define TXVMVPRMSET4R_HFP_MASK GENMASK_U32(28, 16) +#define TXVMVPRMSET4R_HFP(n) FIELD_PREP(TXVMVPRMSET4R_HFP_MASK, (n)) +#define TXVMVPRMSET4R_HBP_MASK GENMASK_U32(12, 0) +#define TXVMVPRMSET4R_HBP(n) FIELD_PREP(TXVMVPRMSET4R_HBP_MASK, (n)) /* * PHY-Protocol Interface (PPI) Registers */ #define PPISETR 0x700 -#define PPISETR_DLEN_MASK (0xf << 0) -#define PPISETR_CLEN (1 << 8) +#define PPISETR_DLEN_MASK GENMASK_U32(3, 0) +#define PPISETR_CLEN BIT_U32(8) #define PPICLCR 0x710 -#define PPICLCR_TXREQHS (1 << 8) -#define PPICLCR_TXULPSEXT (1 << 1) -#define PPICLCR_TXULPSCLK (1 << 0) +#define PPICLCR_TXREQHS BIT_U32(8) +#define PPICLCR_TXULPSEXT BIT_U32(1) +#define PPICLCR_TXULPSCLK BIT_U32(0) #define PPICLSR 0x720 -#define PPICLSR_HSTOLP (1 << 27) -#define PPICLSR_TOHS (1 << 26) -#define PPICLSR_STPST (1 << 0) +#define PPICLSR_HSTOLP BIT_U32(27) +#define PPICLSR_TOHS BIT_U32(26) +#define PPICLSR_STPST BIT_U32(0) #define PPICLSCR 0x724 -#define PPICLSCR_HSTOLP (1 << 27) -#define PPICLSCR_TOHS (1 << 26) +#define PPICLSCR_HSTOLP BIT_U32(27) +#define PPICLSCR_TOHS BIT_U32(26) #define PPIDL0SR 0x740 -#define PPIDL0SR_DIR (1 << 10) -#define PPIDL0SR_STPST (1 << 6) +#define PPIDL0SR_DIR BIT_U32(10) +#define PPIDL0SR_STPST BIT_U32(6) #define PPIDLSR 0x760 -#define PPIDLSR_STPST (0xf << 0) +#define PPIDLSR_STPST GENMASK_U32(3, 0) /* * Clocks registers */ #define LPCLKSET 0x1000 -#define LPCLKSET_CKEN (1 << 8) -#define LPCLKSET_LPCLKDIV(x) (((x) & 0x3f) << 0) +#define LPCLKSET_CKEN BIT_U32(8) +#define LPCLKSET_LPCLKDIV_MASK GENMASK_U32(5, 0) #define CFGCLKSET 0x1004 -#define CFGCLKSET_CKEN (1 << 8) -#define CFGCLKSET_CFGCLKDIV(x) (((x) & 0x3f) << 0) +#define CFGCLKSET_CKEN BIT_U32(8) +#define CFGCLKSET_CFGCLKDIV_MASK GENMASK_U32(5, 0) #define DOTCLKDIV 0x1008 -#define DOTCLKDIV_CKEN (1 << 8) -#define DOTCLKDIV_DOTCLKDIV(x) (((x) & 0x3f) << 0) +#define DOTCLKDIV_CKEN BIT_U32(8) +#define DOTCLKDIV_DOTCLKDIV_MASK GENMASK_U32(5, 0) #define VCLKSET 0x100c -#define VCLKSET_CKEN (1 << 16) -#define VCLKSET_COLOR_RGB (0 << 8) -#define VCLKSET_COLOR_YCC (1 << 8) -#define VCLKSET_DIV_V3U(x) (((x) & 0x3) << 4) -#define VCLKSET_DIV_V4H(x) (((x) & 0x7) << 4) -#define VCLKSET_BPP_16 (0 << 2) -#define VCLKSET_BPP_18 (1 << 2) -#define VCLKSET_BPP_18L (2 << 2) -#define VCLKSET_BPP_24 (3 << 2) -#define VCLKSET_LANE(x) (((x) & 0x3) << 0) +#define VCLKSET_CKEN BIT_U32(16) +#define VCLKSET_COLOR_YCC BIT_U32(8) /* 0:RGB 1:YCbCr */ +#define VCLKSET_DIV_V3U_MASK GENMASK_U32(5, 4) +#define VCLKSET_DIV_V3U(n) FIELD_PREP(VCLKSET_DIV_V3U_MASK, (n)) +#define VCLKSET_DIV_V4H_MASK GENMASK_U32(6, 4) +#define VCLKSET_DIV_V4H(n) FIELD_PREP(VCLKSET_DIV_V4H_MASK, (n)) +#define VCLKSET_BPP_MASK GENMASK_U32(3, 2) +#define VCLKSET_BPP_16 FIELD_PREP(VCLKSET_BPP_MASK, 0) +#define VCLKSET_BPP_18 FIELD_PREP(VCLKSET_BPP_MASK, 1) +#define VCLKSET_BPP_18L FIELD_PREP(VCLKSET_BPP_MASK, 2) +#define VCLKSET_BPP_24 FIELD_PREP(VCLKSET_BPP_MASK, 3) +#define VCLKSET_LANE_MASK GENMASK_U32(1, 0) +#define VCLKSET_LANE(n) FIELD_PREP(VCLKSET_LANE_MASK, (n)) #define VCLKEN 0x1010 -#define VCLKEN_CKEN (1 << 0) +#define VCLKEN_CKEN BIT_U32(0) #define PHYSETUP 0x1014 -#define PHYSETUP_HSFREQRANGE(x) (((x) & 0x7f) << 16) -#define PHYSETUP_HSFREQRANGE_MASK (0x7f << 16) -#define PHYSETUP_CFGCLKFREQRANGE(x) (((x) & 0x3f) << 8) -#define PHYSETUP_SHUTDOWNZ (1 << 1) -#define PHYSETUP_RSTZ (1 << 0) +#define PHYSETUP_HSFREQRANGE_MASK GENMASK_U32(22, 16) +#define PHYSETUP_HSFREQRANGE(n) FIELD_PREP(PHYSETUP_HSFREQRANGE_MASK, (n)) +#define PHYSETUP_CFGCLKFREQRANGE_MASK GENMASK_U32(13, 8) +#define PHYSETUP_SHUTDOWNZ BIT_U32(1) +#define PHYSETUP_RSTZ BIT_U32(0) #define CLOCKSET1 0x101c -#define CLOCKSET1_LOCK_PHY (1 << 17) -#define CLOCKSET1_CLKSEL (1 << 8) -#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2) -#define CLOCKSET1_CLKINSEL_DIG (1 << 2) -#define CLOCKSET1_CLKINSEL_DU (1 << 3) -#define CLOCKSET1_SHADOW_CLEAR (1 << 1) -#define CLOCKSET1_UPDATEPLL (1 << 0) +#define CLOCKSET1_LOCK_PHY BIT_U32(17) +#define CLOCKSET1_CLKSEL BIT_U32(8) +#define CLOCKSET1_CLKINSEL_MASK GENMASK_U32(3, 2) +#define CLOCKSET1_CLKINSEL_EXTAL FIELD_PREP(CLOCKSET1_CLKINSEL_MASK, 0) +#define CLOCKSET1_CLKINSEL_DIG FIELD_PREP(CLOCKSET1_CLKINSEL_MASK, 1) +#define CLOCKSET1_CLKINSEL_DU FIELD_PREP(CLOCKSET1_CLKINSEL_MASK, 2) +#define CLOCKSET1_SHADOW_CLEAR BIT_U32(1) +#define CLOCKSET1_UPDATEPLL BIT_U32(0) #define CLOCKSET2 0x1020 -#define CLOCKSET2_M(x) (((x) & 0xfff) << 16) -#define CLOCKSET2_VCO_CNTRL(x) (((x) & 0x3f) << 8) -#define CLOCKSET2_N(x) (((x) & 0xf) << 0) +#define CLOCKSET2_M_MASK GENMASK_U32(27, 16) +#define CLOCKSET2_M(n) FIELD_PREP(CLOCKSET2_M_MASK, (n)) +#define CLOCKSET2_VCO_CNTRL_MASK GENMASK_U32(13, 8) +#define CLOCKSET2_VCO_CNTRL(n) FIELD_PREP(CLOCKSET2_VCO_CNTRL_MASK, (n)) +#define CLOCKSET2_N_MASK GENMASK_U32(3, 0) +#define CLOCKSET2_N(n) FIELD_PREP(CLOCKSET2_N_MASK, (n)) #define CLOCKSET3 0x1024 -#define CLOCKSET3_PROP_CNTRL(x) (((x) & 0x3f) << 24) -#define CLOCKSET3_INT_CNTRL(x) (((x) & 0x3f) << 16) -#define CLOCKSET3_CPBIAS_CNTRL(x) (((x) & 0x7f) << 8) -#define CLOCKSET3_GMP_CNTRL(x) (((x) & 0x3) << 0) +#define CLOCKSET3_PROP_CNTRL_MASK GENMASK_U32(29, 24) +#define CLOCKSET3_PROP_CNTRL(n) FIELD_PREP(CLOCKSET3_PROP_CNTRL_MASK, (n)) +#define CLOCKSET3_INT_CNTRL_MASK GENMASK_U32(21, 16) +#define CLOCKSET3_INT_CNTRL(n) FIELD_PREP(CLOCKSET3_INT_CNTRL_MASK, (n)) +#define CLOCKSET3_CPBIAS_CNTRL_MASK GENMASK_U32(14, 8) +#define CLOCKSET3_CPBIAS_CNTRL(n) FIELD_PREP(CLOCKSET3_CPBIAS_CNTRL_MASK, (n)) +#define CLOCKSET3_GMP_CNTRL_MASK GENMASK_U32(1, 0) +#define CLOCKSET3_GMP_CNTRL(n) FIELD_PREP(CLOCKSET3_GMP_CNTRL_MASK, (n)) #define PHTW 0x1034 -#define PHTW_DWEN (1 << 24) -#define PHTW_TESTDIN_DATA(x) (((x) & 0xff) << 16) -#define PHTW_CWEN (1 << 8) -#define PHTW_TESTDIN_CODE(x) (((x) & 0xff) << 0) +#define PHTW_DWEN BIT_U32(24) +#define PHTW_TESTDIN_DATA_MASK GENMASK_U32(23, 16) +#define PHTW_CWEN BIT_U32(8) +#define PHTW_TESTDIN_CODE_MASK GENMASK_U32(7, 0) #define PHTR 0x1038 -#define PHTR_TEST (1 << 16) +#define PHTR_TESTDOUT GENMASK_U32(23, 16) +#define PHTR_TESTDOUT_TEST BIT_U32(16) #define PHTC 0x103c -#define PHTC_TESTCLR (1 << 0) +#define PHTC_TESTCLR BIT_U32(0) #endif /* __RCAR_MIPI_DSI_REGS_H__ */ diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig index e57536fd6f4d..7f2ef7137ae5 100644 --- a/drivers/gpu/drm/renesas/rz-du/Kconfig +++ b/drivers/gpu/drm/renesas/rz-du/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config DRM_RZG2L_DU tristate "DRM Support for RZ/G2L Display Unit" - depends on ARCH_RZG2L || COMPILE_TEST + depends on ARCH_RENESAS || COMPILE_TEST depends on DRM && OF depends on VIDEO_RENESAS_VSP1 select DRM_CLIENT_SELECTION diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c index e1aa6a719529..0fef33a5a089 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c @@ -17,6 +17,7 @@ #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "rzg2l_du_drv.h" diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index d30f0983a53a..fdab71d51e2a 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -28,6 +28,7 @@ #include <drm/bridge/analogix_dp.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> @@ -330,38 +331,29 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp) struct device_node *np = dev->of_node; dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); - if (IS_ERR(dp->grf)) { - DRM_DEV_ERROR(dev, "failed to get rockchip,grf property\n"); - return PTR_ERR(dp->grf); - } + if (IS_ERR(dp->grf)) + return dev_err_probe(dev, PTR_ERR(dp->grf), + "failed to get rockchip,grf property\n"); - dp->grfclk = devm_clk_get(dev, "grf"); - if (PTR_ERR(dp->grfclk) == -ENOENT) { - dp->grfclk = NULL; - } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) { - return -EPROBE_DEFER; - } else if (IS_ERR(dp->grfclk)) { - DRM_DEV_ERROR(dev, "failed to get grf clock\n"); - return PTR_ERR(dp->grfclk); - } + dp->grfclk = devm_clk_get_optional(dev, "grf"); + if (IS_ERR(dp->grfclk)) + return dev_err_probe(dev, PTR_ERR(dp->grfclk), + "failed to get grf clock\n"); dp->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(dp->pclk)) { - DRM_DEV_ERROR(dev, "failed to get pclk property\n"); - return PTR_ERR(dp->pclk); - } + if (IS_ERR(dp->pclk)) + return dev_err_probe(dev, PTR_ERR(dp->pclk), + "failed to get pclk property\n"); dp->rst = devm_reset_control_get(dev, "dp"); - if (IS_ERR(dp->rst)) { - DRM_DEV_ERROR(dev, "failed to get dp reset control\n"); - return PTR_ERR(dp->rst); - } + if (IS_ERR(dp->rst)) + return dev_err_probe(dev, PTR_ERR(dp->rst), + "failed to get dp reset control\n"); dp->apbrst = devm_reset_control_get_optional(dev, "apb"); - if (IS_ERR(dp->apbrst)) { - DRM_DEV_ERROR(dev, "failed to get apb reset control\n"); - return PTR_ERR(dp->apbrst); - } + if (IS_ERR(dp->apbrst)) + return dev_err_probe(dev, PTR_ERR(dp->apbrst), + "failed to get apb reset control\n"); return 0; } diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index b7e3f5dcf8d5..177e30445ee8 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -21,6 +21,7 @@ #include <drm/drm_bridge_connector.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c index 924fb1d3ece2..0dc3804051a9 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c @@ -11,6 +11,8 @@ #include <linux/iopoll.h> #include <linux/reset.h> +#include <drm/drm_print.h> + #include "cdn-dp-core.h" #include "cdn-dp-reg.h" diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index 5523911b990d..2dad6b7b61b2 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -24,6 +24,7 @@ #include <drm/bridge/dw_mipi_dsi.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "rockchip_drm_drv.h" @@ -163,6 +164,11 @@ #define RK3288_DSI0_LCDC_SEL BIT(6) #define RK3288_DSI1_LCDC_SEL BIT(9) +#define RK3368_GRF_SOC_CON7 0x41c +#define RK3368_DSI_FORCETXSTOPMODE (0xf << 7) +#define RK3368_DSI_FORCERXMODE BIT(6) +#define RK3368_DSI_TURNDISABLE BIT(5) + #define RK3399_GRF_SOC_CON20 0x6250 #define RK3399_DSI0_LCDC_SEL BIT(0) #define RK3399_DSI1_LCDC_SEL BIT(4) @@ -1528,6 +1534,18 @@ static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = { { /* sentinel */ } }; +static const struct rockchip_dw_dsi_chip_data rk3368_chip_data[] = { + { + .reg = 0xff960000, + .lanecfg1_grf_reg = RK3368_GRF_SOC_CON7, + .lanecfg1 = FIELD_PREP_WM16_CONST((RK3368_DSI_TURNDISABLE | + RK3368_DSI_FORCETXSTOPMODE | + RK3368_DSI_FORCERXMODE), 0), + .max_data_lanes = 4, + }, + { /* sentinel */ } +}; + static int rk3399_dphy_tx1rx1_init(struct phy *phy) { struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy); @@ -1688,6 +1706,9 @@ static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = { .compatible = "rockchip,rk3288-mipi-dsi", .data = &rk3288_chip_data, }, { + .compatible = "rockchip,rk3368-mipi-dsi", + .data = &rk3368_chip_data, + }, { .compatible = "rockchip,rk3399-mipi-dsi", .data = &rk3399_chip_data, }, { diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c index ed6e8f036f4b..c9fe6aa3e3e3 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> +#include <linux/phy/phy-hdmi.h> #include <linux/regmap.h> #include <linux/workqueue.h> @@ -38,21 +39,16 @@ #define RK3576_HDMI_HDCP14_MEM_EN BIT(15) #define RK3576_VO0_GRF_SOC_CON8 0x0020 -#define RK3576_COLOR_FORMAT_MASK (0xf << 4) -#define RK3576_COLOR_DEPTH_MASK (0xf << 8) -#define RK3576_RGB (0 << 4) -#define RK3576_YUV422 (0x1 << 4) -#define RK3576_YUV444 (0x2 << 4) -#define RK3576_YUV420 (0x3 << 4) -#define RK3576_8BPC (0x0 << 8) -#define RK3576_10BPC (0x6 << 8) +#define RK3576_COLOR_DEPTH_MASK GENMASK(11, 8) +#define RK3576_8BPC 0x0 +#define RK3576_10BPC 0x6 +#define RK3576_COLOR_FORMAT_MASK GENMASK(7, 4) +#define RK3576_RGB 0x9 +#define RK3576_YUV422 0x1 +#define RK3576_YUV444 0x2 +#define RK3576_YUV420 0x3 #define RK3576_CECIN_MASK BIT(3) -#define RK3576_VO0_GRF_SOC_CON12 0x0030 -#define RK3576_GRF_OSDA_DLYN (0xf << 12) -#define RK3576_GRF_OSDA_DIV (0x7f << 1) -#define RK3576_GRF_OSDA_DLY_EN BIT(0) - #define RK3576_VO0_GRF_SOC_CON14 0x0038 #define RK3576_I2S_SEL_MASK BIT(0) #define RK3576_SPDIF_SEL_MASK BIT(1) @@ -74,6 +70,12 @@ #define RK3588_HDMI1_LEVEL_INT BIT(24) #define RK3588_GRF_VO1_CON3 0x000c #define RK3588_GRF_VO1_CON6 0x0018 +#define RK3588_COLOR_DEPTH_MASK GENMASK(7, 4) +#define RK3588_8BPC 0x0 +#define RK3588_10BPC 0x6 +#define RK3588_COLOR_FORMAT_MASK GENMASK(3, 0) +#define RK3588_RGB 0x0 +#define RK3588_YUV420 0x3 #define RK3588_SCLIN_MASK BIT(9) #define RK3588_SDAIN_MASK BIT(10) #define RK3588_MODE_MASK BIT(11) @@ -92,14 +94,16 @@ struct rockchip_hdmi_qp { struct rockchip_encoder encoder; struct dw_hdmi_qp *hdmi; struct phy *phy; - struct gpio_desc *enable_gpio; + struct gpio_desc *frl_enable_gpio; struct delayed_work hpd_work; int port_id; const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops; + unsigned long long tmds_char_rate; }; struct rockchip_hdmi_qp_ctrl_ops { void (*io_init)(struct rockchip_hdmi_qp *hdmi); + void (*enc_init)(struct rockchip_hdmi_qp *hdmi, struct rockchip_crtc_state *state); irqreturn_t (*irq_callback)(int irq, void *dev_id); irqreturn_t (*hardirq_callback)(int irq, void *dev_id); }; @@ -115,23 +119,15 @@ static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder) { struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder); struct drm_crtc *crtc = encoder->crtc; - unsigned long long rate; /* Unconditionally switch to TMDS as FRL is not yet supported */ - gpiod_set_value(hdmi->enable_gpio, 1); - - if (crtc && crtc->state) { - rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode, - 8, HDMI_COLORSPACE_RGB); - /* - * FIXME: Temporary workaround to pass pixel clock rate - * to the PHY driver until phy_configure_opts_hdmi - * becomes available in the PHY API. See also the related - * comment in rk_hdptx_phy_power_on() from - * drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c - */ - phy_set_bus_width(hdmi->phy, div_u64(rate, 100)); - } + gpiod_set_value(hdmi->frl_enable_gpio, 0); + + if (!crtc || !crtc->state) + return; + + if (hdmi->ctrl_ops->enc_init) + hdmi->ctrl_ops->enc_init(hdmi, to_rockchip_crtc_state(crtc->state)); } static int @@ -139,12 +135,29 @@ dw_hdmi_qp_rockchip_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { + struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder); struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); + union phy_configure_opts phy_cfg = {}; + int ret; + + if (hdmi->tmds_char_rate == conn_state->hdmi.tmds_char_rate && + s->output_bpc == conn_state->hdmi.output_bpc) + return 0; + + phy_cfg.hdmi.tmds_char_rate = conn_state->hdmi.tmds_char_rate; + phy_cfg.hdmi.bpc = conn_state->hdmi.output_bpc; + + ret = phy_configure(hdmi->phy, &phy_cfg); + if (!ret) { + hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate; + s->output_mode = ROCKCHIP_OUT_MODE_AAAA; + s->output_type = DRM_MODE_CONNECTOR_HDMIA; + s->output_bpc = conn_state->hdmi.output_bpc; + } else { + dev_err(hdmi->dev, "Failed to configure phy: %d\n", ret); + } - s->output_mode = ROCKCHIP_OUT_MODE_AAAA; - s->output_type = DRM_MODE_CONNECTOR_HDMIA; - - return 0; + return ret; } static const struct @@ -375,15 +388,45 @@ static void dw_hdmi_qp_rk3588_io_init(struct rockchip_hdmi_qp *hdmi) regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); } +static void dw_hdmi_qp_rk3576_enc_init(struct rockchip_hdmi_qp *hdmi, + struct rockchip_crtc_state *state) +{ + u32 val; + + if (state->output_bpc == 10) + val = FIELD_PREP_WM16(RK3576_COLOR_DEPTH_MASK, RK3576_10BPC); + else + val = FIELD_PREP_WM16(RK3576_COLOR_DEPTH_MASK, RK3576_8BPC); + + regmap_write(hdmi->vo_regmap, RK3576_VO0_GRF_SOC_CON8, val); +} + +static void dw_hdmi_qp_rk3588_enc_init(struct rockchip_hdmi_qp *hdmi, + struct rockchip_crtc_state *state) +{ + u32 val; + + if (state->output_bpc == 10) + val = FIELD_PREP_WM16(RK3588_COLOR_DEPTH_MASK, RK3588_10BPC); + else + val = FIELD_PREP_WM16(RK3588_COLOR_DEPTH_MASK, RK3588_8BPC); + + regmap_write(hdmi->vo_regmap, + hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3, + val); +} + static const struct rockchip_hdmi_qp_ctrl_ops rk3576_hdmi_ctrl_ops = { .io_init = dw_hdmi_qp_rk3576_io_init, - .irq_callback = dw_hdmi_qp_rk3576_irq, + .enc_init = dw_hdmi_qp_rk3576_enc_init, + .irq_callback = dw_hdmi_qp_rk3576_irq, .hardirq_callback = dw_hdmi_qp_rk3576_hardirq, }; static const struct rockchip_hdmi_qp_ctrl_ops rk3588_hdmi_ctrl_ops = { .io_init = dw_hdmi_qp_rk3588_io_init, - .irq_callback = dw_hdmi_qp_rk3588_irq, + .enc_init = dw_hdmi_qp_rk3588_enc_init, + .irq_callback = dw_hdmi_qp_rk3588_irq, .hardirq_callback = dw_hdmi_qp_rk3588_hardirq, }; @@ -429,14 +472,15 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); + struct dw_hdmi_qp_plat_data plat_data = {}; const struct rockchip_hdmi_qp_cfg *cfg; - struct dw_hdmi_qp_plat_data plat_data; struct drm_device *drm = data; struct drm_connector *connector; struct drm_encoder *encoder; struct rockchip_hdmi_qp *hdmi; struct resource *res; struct clk_bulk_data *clks; + struct clk *ref_clk; int ret, irq, i; if (!pdev->dev.of_node) @@ -455,10 +499,8 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, return -ENODEV; if (!cfg->ctrl_ops || !cfg->ctrl_ops->io_init || - !cfg->ctrl_ops->irq_callback || !cfg->ctrl_ops->hardirq_callback) { - dev_err(dev, "Missing platform ctrl ops\n"); - return -ENODEV; - } + !cfg->ctrl_ops->irq_callback || !cfg->ctrl_ops->hardirq_callback) + return dev_err_probe(dev, -ENODEV, "Missing platform ctrl ops\n"); hdmi->ctrl_ops = cfg->ctrl_ops; hdmi->dev = &pdev->dev; @@ -471,13 +513,13 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, break; } } - if (hdmi->port_id < 0) { - dev_err(hdmi->dev, "Failed to match HDMI port ID\n"); - return hdmi->port_id; - } + if (hdmi->port_id < 0) + return dev_err_probe(hdmi->dev, hdmi->port_id, + "Failed to match HDMI port ID\n"); plat_data.phy_ops = cfg->phy_ops; plat_data.phy_data = hdmi; + plat_data.max_bpc = 10; encoder = &hdmi->encoder.encoder; encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); @@ -495,39 +537,38 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); - if (IS_ERR(hdmi->regmap)) { - dev_err(hdmi->dev, "Unable to get rockchip,grf\n"); - return PTR_ERR(hdmi->regmap); - } + if (IS_ERR(hdmi->regmap)) + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->regmap), + "Unable to get rockchip,grf\n"); hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vo-grf"); - if (IS_ERR(hdmi->vo_regmap)) { - dev_err(hdmi->dev, "Unable to get rockchip,vo-grf\n"); - return PTR_ERR(hdmi->vo_regmap); - } + if (IS_ERR(hdmi->vo_regmap)) + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->vo_regmap), + "Unable to get rockchip,vo-grf\n"); ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks); - if (ret < 0) { - dev_err(hdmi->dev, "Failed to get clocks: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(hdmi->dev, ret, "Failed to get clocks\n"); - hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable", - GPIOD_OUT_HIGH); - if (IS_ERR(hdmi->enable_gpio)) { - ret = PTR_ERR(hdmi->enable_gpio); - dev_err(hdmi->dev, "Failed to request enable GPIO: %d\n", ret); - return ret; - } + ref_clk = clk_get(hdmi->dev, "ref"); + if (IS_ERR(ref_clk)) + return dev_err_probe(hdmi->dev, PTR_ERR(ref_clk), + "Failed to get ref clock\n"); + + plat_data.ref_clk_rate = clk_get_rate(ref_clk); + clk_put(ref_clk); + + hdmi->frl_enable_gpio = devm_gpiod_get_optional(hdmi->dev, "frl-enable", + GPIOD_OUT_LOW); + if (IS_ERR(hdmi->frl_enable_gpio)) + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->frl_enable_gpio), + "Failed to request FRL enable GPIO\n"); hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0); - if (IS_ERR(hdmi->phy)) { - ret = PTR_ERR(hdmi->phy); - if (ret != -EPROBE_DEFER) - dev_err(hdmi->dev, "failed to get phy: %d\n", ret); - return ret; - } + if (IS_ERR(hdmi->phy)) + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->phy), + "Failed to get phy\n"); cfg->ctrl_ops->io_init(hdmi); @@ -537,6 +578,10 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, if (plat_data.main_irq < 0) return plat_data.main_irq; + plat_data.cec_irq = platform_get_irq_byname(pdev, "cec"); + if (plat_data.cec_irq < 0) + return plat_data.cec_irq; + irq = platform_get_irq_byname(pdev, "hpd"); if (irq < 0) return irq; @@ -556,17 +601,15 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, hdmi->hdmi = dw_hdmi_qp_bind(pdev, encoder, &plat_data); if (IS_ERR(hdmi->hdmi)) { - ret = PTR_ERR(hdmi->hdmi); drm_encoder_cleanup(encoder); - return ret; + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->hdmi), + "Failed to bind dw-hdmi-qp"); } connector = drm_bridge_connector_init(drm, encoder); - if (IS_ERR(connector)) { - ret = PTR_ERR(connector); - dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret); - return ret; - } + if (IS_ERR(connector)) + return dev_err_probe(hdmi->dev, PTR_ERR(connector), + "Failed to init bridge connector\n"); return drm_connector_attach_encoder(connector, encoder); } diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index f24827dc1421..9f7a8cf0ab44 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -22,6 +22,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c index ae4a5ac2299a..997429115068 100644 --- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c +++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c @@ -10,6 +10,7 @@ #include <drm/display/drm_hdmi_state_helper.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index eb77bde9f628..3099408e9d05 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -22,6 +22,7 @@ #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -96,6 +97,9 @@ void rockchip_drm_dma_init_device(struct drm_device *drm_dev, private->iommu_dev = ERR_PTR(-ENODEV); else if (!private->iommu_dev) private->iommu_dev = dev; + + if (!IS_ERR(private->iommu_dev)) + drm_dev_set_dma_dev(drm_dev, private->iommu_dev); } static int rockchip_drm_init_iommu(struct drm_device *drm_dev) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 6330b883efc3..df9a8bff2e22 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -9,10 +9,12 @@ #include <linux/vmalloc.h> #include <drm/drm.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/drm_vma_manager.h> #include "rockchip_drm_drv.h" @@ -403,13 +405,12 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct rockchip_gem_object *rk_obj; - int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + int ret; - /* - * align to 64 bytes since Mali requires it. - */ - args->pitch = ALIGN(min_pitch, 64); - args->size = args->pitch * args->height; + /* 64-byte alignment required by Mali */ + ret = drm_mode_size_dumb(dev, args, SZ_64, 0); + if (ret) + return ret; rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, &args->handle); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index ba6b0528d1e5..ad4ab894391a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -27,6 +27,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_self_refresh_helper.h> #include <drm/drm_vblank.h> @@ -826,8 +827,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, if (!crtc || WARN_ON(!fb)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -1092,7 +1092,8 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane, if (!plane->state->fb) return -EINVAL; - crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); /* Special case for asynchronous cursor updates. */ if (!crtc_state) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 7ec7bea5e38e..498df0ce4680 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -29,6 +29,7 @@ #include <drm/drm_flip_work.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -101,7 +102,7 @@ enum vop2_afbc_format { VOP2_AFBC_FMT_INVALID = -1, }; -#define VOP2_MAX_DCLK_RATE 600000000 +#define VOP2_MAX_DCLK_RATE 600000000UL /* * bus-format types. @@ -1003,6 +1004,8 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, struct drm_rect *src = &pstate->src; int min_scale = FRAC_16_16(1, 8); int max_scale = FRAC_16_16(8, 1); + int src_x, src_w, src_h; + int dest_w, dest_h; int format; int ret; @@ -1013,7 +1016,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, vop2 = vp->vop2; vop2_data = vop2->data; - cstate = drm_atomic_get_existing_crtc_state(pstate->state, crtc); + cstate = drm_atomic_get_new_crtc_state(pstate->state, crtc); if (WARN_ON(!cstate)) return -EINVAL; @@ -1030,22 +1033,25 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, if (format < 0) return format; - if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 || - drm_rect_width(dest) < 4 || drm_rect_height(dest) < 4) { - drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n", - drm_rect_width(src) >> 16, drm_rect_height(src) >> 16, - drm_rect_width(dest), drm_rect_height(dest)); - pstate->visible = false; - return 0; + /* Co-ordinates have now been clipped */ + src_x = src->x1 >> 16; + src_w = drm_rect_width(src) >> 16; + src_h = drm_rect_height(src) >> 16; + dest_w = drm_rect_width(dest); + dest_h = drm_rect_height(dest); + + if (src_w < 4 || src_h < 4 || dest_w < 4 || dest_h < 4) { + drm_dbg_kms(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n", + src_w, src_h, dest_w, dest_h); + return -EINVAL; } - if (drm_rect_width(src) >> 16 > vop2_data->max_input.width || - drm_rect_height(src) >> 16 > vop2_data->max_input.height) { - drm_err(vop2->drm, "Invalid source: %dx%d. max input: %dx%d\n", - drm_rect_width(src) >> 16, - drm_rect_height(src) >> 16, - vop2_data->max_input.width, - vop2_data->max_input.height); + if (src_w > vop2_data->max_input.width || + src_h > vop2_data->max_input.height) { + drm_dbg_kms(vop2->drm, "Invalid source: %dx%d. max input: %dx%d\n", + src_w, src_h, + vop2_data->max_input.width, + vop2_data->max_input.height); return -EINVAL; } @@ -1053,8 +1059,8 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, * Src.x1 can be odd when do clip, but yuv plane start point * need align with 2 pixel. */ - if (fb->format->is_yuv && ((pstate->src.x1 >> 16) % 2)) { - drm_err(vop2->drm, "Invalid Source: Yuv format not support odd xpos\n"); + if (fb->format->is_yuv && src_x % 2) { + drm_dbg_kms(vop2->drm, "Invalid Source: Yuv format not support odd xpos\n"); return -EINVAL; } @@ -1140,7 +1146,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, struct vop2 *vop2 = win->vop2; struct drm_framebuffer *fb = pstate->fb; u32 bpp = vop2_get_bpp(fb->format); - u32 actual_w, actual_h, dsp_w, dsp_h; + u32 src_w, src_h, dsp_w, dsp_h; u32 act_info, dsp_info; u32 format; u32 afbc_format; @@ -1204,8 +1210,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, uv_mst = rk_obj->dma_addr + offset + fb->offsets[1]; } - actual_w = drm_rect_width(src) >> 16; - actual_h = drm_rect_height(src) >> 16; + src_w = drm_rect_width(src) >> 16; + src_h = drm_rect_height(src) >> 16; dsp_w = drm_rect_width(dest); if (dest->x1 + dsp_w > adjusted_mode->hdisplay) { @@ -1215,7 +1221,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, dsp_w = adjusted_mode->hdisplay - dest->x1; if (dsp_w < 4) dsp_w = 4; - actual_w = dsp_w * actual_w / drm_rect_width(dest); + src_w = dsp_w * src_w / drm_rect_width(dest); } dsp_h = drm_rect_height(dest); @@ -1227,35 +1233,35 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, dsp_h = adjusted_mode->vdisplay - dest->y1; if (dsp_h < 4) dsp_h = 4; - actual_h = dsp_h * actual_h / drm_rect_height(dest); + src_h = dsp_h * src_h / drm_rect_height(dest); } /* * This is workaround solution for IC design: - * esmart can't support scale down when actual_w % 16 == 1. + * esmart can't support scale down when src_w % 16 == 1. */ if (!(win->data->feature & WIN_FEATURE_AFBDC)) { - if (actual_w > dsp_w && (actual_w & 0xf) == 1) { + if (src_w > dsp_w && (src_w & 0xf) == 1) { drm_dbg_kms(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n", - vp->id, win->data->name, actual_w); - actual_w -= 1; + vp->id, win->data->name, src_w); + src_w -= 1; } } - if (afbc_en && actual_w % 4) { - drm_dbg_kms(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n", - vp->id, win->data->name, actual_w); - actual_w = ALIGN_DOWN(actual_w, 4); + if (afbc_en && src_w % 4) { + drm_dbg_kms(vop2->drm, "vp%d %s src_w[%d] not 4 pixel aligned\n", + vp->id, win->data->name, src_w); + src_w = ALIGN_DOWN(src_w, 4); } - act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff); + act_info = (src_h - 1) << 16 | ((src_w - 1) & 0xffff); dsp_info = (dsp_h - 1) << 16 | ((dsp_w - 1) & 0xffff); format = vop2_convert_format(fb->format->format); half_block_en = vop2_half_block_enable(pstate); drm_dbg(vop2->drm, "vp%d update %s[%dx%d->%dx%d@%dx%d] fmt[%p4cc_%s] addr[%pad]\n", - vp->id, win->data->name, actual_w, actual_h, dsp_w, dsp_h, + vp->id, win->data->name, src_w, src_h, dsp_w, dsp_h, dest->x1, dest->y1, &fb->format->format, afbc_en ? "AFBC" : "", &yrgb_mst); @@ -1284,7 +1290,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, if (fb->modifier & AFBC_FORMAT_MOD_YTR) afbc_format |= (1 << 4); - afbc_tile_num = ALIGN(actual_w, block_w) / block_w; + afbc_tile_num = ALIGN(src_w, block_w) / block_w; /* * AFBC pic_vir_width is count by pixel, this is different @@ -1362,8 +1368,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, if (rotate_90 || rotate_270) { act_info = swahw32(act_info); - actual_w = drm_rect_height(src) >> 16; - actual_h = drm_rect_width(src) >> 16; + src_w = drm_rect_height(src) >> 16; + src_h = drm_rect_width(src) >> 16; } vop2_win_write(win, VOP2_WIN_FORMAT, format); @@ -1379,7 +1385,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, vop2_win_write(win, VOP2_WIN_UV_MST, uv_mst); } - vop2_setup_scale(vop2, win, actual_w, actual_h, dsp_w, dsp_h, fb->format->format); + vop2_setup_scale(vop2, win, src_w, src_h, dsp_w, dsp_h, fb->format->format); if (!vop2_cluster_window(win)) vop2_plane_setup_color_key(plane, 0); vop2_win_write(win, VOP2_WIN_ACT_INFO, act_info); @@ -1737,36 +1743,42 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc, * Switch to HDMI PHY PLL as DCLK source for display modes up * to 4K@60Hz, if available, otherwise keep using the system CRU. */ - if ((vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) && clock <= VOP2_MAX_DCLK_RATE) { - drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) { - struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); - - if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) { - if (!vop2->pll_hdmiphy0) + if (vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) { + unsigned long max_dclk = DIV_ROUND_CLOSEST_ULL(VOP2_MAX_DCLK_RATE * 8, + vcstate->output_bpc); + if (clock <= max_dclk) { + drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) { + struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); + + if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) { + if (!vop2->pll_hdmiphy0) + break; + + if (!vp->dclk_src) + vp->dclk_src = clk_get_parent(vp->dclk); + + ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0); + if (ret < 0) + drm_warn(vop2->drm, + "Could not switch to HDMI0 PHY PLL: %d\n", + ret); break; + } - if (!vp->dclk_src) - vp->dclk_src = clk_get_parent(vp->dclk); + if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) { + if (!vop2->pll_hdmiphy1) + break; - ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0); - if (ret < 0) - drm_warn(vop2->drm, - "Could not switch to HDMI0 PHY PLL: %d\n", ret); - break; - } + if (!vp->dclk_src) + vp->dclk_src = clk_get_parent(vp->dclk); - if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) { - if (!vop2->pll_hdmiphy1) + ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1); + if (ret < 0) + drm_warn(vop2->drm, + "Could not switch to HDMI1 PHY PLL: %d\n", + ret); break; - - if (!vp->dclk_src) - vp->dclk_src = clk_get_parent(vp->dclk); - - ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1); - if (ret < 0) - drm_warn(vop2->drm, - "Could not switch to HDMI1 PHY PLL: %d\n", ret); - break; + } } } } @@ -2647,6 +2659,12 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) if (IS_ERR(vop2->map)) return PTR_ERR(vop2->map); + /* Set the bounds for framebuffer creation */ + drm->mode_config.min_width = 4; + drm->mode_config.min_height = 4; + drm->mode_config.max_width = vop2_data->max_input.width; + drm->mode_config.max_height = vop2_data->max_input.height; + ret = vop2_win_init(vop2); if (ret) return ret; diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 2411260db51d..75f898a10cbc 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -22,6 +22,7 @@ #include <drm/drm_bridge_connector.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 811020665120..5c0c6e2cc28d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c @@ -15,6 +15,7 @@ #include <drm/drm_bridge_connector.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 38c49030c7ab..cd8380f0eddc 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -1369,6 +1369,25 @@ static const struct vop2_regs_dump rk3588_regs_dump[] = { }, }; +/* + * phys_id is used to identify a main window(Cluster Win/Smart Win, not + * include the sub win of a cluster or the multi area) that can do overlay + * in main overlay stage. + */ +static struct vop2_win *vop2_find_win_by_phys_id(struct vop2 *vop2, uint8_t phys_id) +{ + struct vop2_win *win; + int i; + + for (i = 0; i < vop2->data->win_size; i++) { + win = &vop2->win[i]; + if (win->data->phys_id == phys_id) + return win; + } + + return NULL; +} + static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) { struct vop2 *vop2 = vp->vop2; @@ -1842,15 +1861,31 @@ static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config, alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; } -static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id) +static int vop2_find_start_mixer_id_for_vp(struct vop2_video_port *vp) { - struct vop2_video_port *vp; - int used_layer = 0; + struct vop2 *vop2 = vp->vop2; + struct vop2_win *win; + u32 layer_sel = vop2->old_layer_sel; + u32 used_layer = 0; + unsigned long win_mask = vp->win_mask; + unsigned long phys_id; + bool match; int i; - for (i = 0; i < port_id; i++) { - vp = &vop2->vps[i]; - used_layer += hweight32(vp->win_mask); + for (i = 0; i < 31; i += 4) { + match = false; + for_each_set_bit(phys_id, &win_mask, ROCKCHIP_VOP2_ESMART3) { + win = vop2_find_win_by_phys_id(vop2, phys_id); + if (win->data->layer_sel_id[vp->id] == ((layer_sel >> i) & 0xf)) { + match = true; + break; + } + } + + if (!match) + used_layer += 1; + else + break; } return used_layer; @@ -1935,7 +1970,7 @@ static void vop2_setup_alpha(struct vop2_video_port *vp) u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE; if (vop2->version <= VOP_VERSION_RK3588) - mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id); + mixer_id = vop2_find_start_mixer_id_for_vp(vp); else mixer_id = 0; diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index d1f788763318..219f8c2fa88e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -880,6 +880,7 @@ static const struct vop_data rk3368_vop = { .win = rk3368_vop_win_data, .win_size = ARRAY_SIZE(rk3368_vop_win_data), .max_output = { 4096, 2160 }, + .lut_size = 1024, }; static const struct vop_intr rk3366_vop_intr = { diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index c8e949f4a568..fe174a4857be 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -173,26 +173,15 @@ int drm_sched_entity_error(struct drm_sched_entity *entity) } EXPORT_SYMBOL(drm_sched_entity_error); +static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, + struct dma_fence_cb *cb); + static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) { struct drm_sched_job *job = container_of(wrk, typeof(*job), work); - - drm_sched_fence_scheduled(job->s_fence, NULL); - drm_sched_fence_finished(job->s_fence, -ESRCH); - WARN_ON(job->s_fence->parent); - job->sched->ops->free_job(job); -} - -/* Signal the scheduler finished fence when the entity in question is killed. */ -static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_job *job = container_of(cb, struct drm_sched_job, - finish_cb); + struct dma_fence *f; unsigned long index; - dma_fence_put(f); - /* Wait for all dependencies to avoid data corruptions */ xa_for_each(&job->dependencies, index, f) { struct drm_sched_fence *s_fence = to_drm_sched_fence(f); @@ -220,6 +209,21 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, dma_fence_put(f); } + drm_sched_fence_scheduled(job->s_fence, NULL); + drm_sched_fence_finished(job->s_fence, -ESRCH); + WARN_ON(job->s_fence->parent); + job->sched->ops->free_job(job); +} + +/* Signal the scheduler finished fence when the entity in question is killed. */ +static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, + struct dma_fence_cb *cb) +{ + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, + finish_cb); + + dma_fence_put(f); + INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); schedule_work(&job->work); } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index c39f0245e3a9..1d4f1b822e7b 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1237,8 +1237,13 @@ static void drm_sched_run_job_work(struct work_struct *w) /* Find entity with a ready job */ entity = drm_sched_select_entity(sched); - if (!entity) - return; /* No more work */ + if (!entity) { + /* + * Either no more work to do, or the next ready job needs more + * credits than the scheduler has currently available. + */ + return; + } sched_job = drm_sched_entity_pop_job(entity); if (!sched_job) { @@ -1315,7 +1320,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_ sched->name = args->name; sched->timeout = args->timeout; sched->hang_limit = args->hang_limit; - sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq; + sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_percpu_wq; sched->score = args->score ? args->score : &sched->_score; sched->dev = args->dev; @@ -1420,7 +1425,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); - list_for_each_entry(s_entity, &rq->entities, list) + list_for_each_entry(s_entity, &rq->entities, list) { /* * Prevents reinsertion and marks job_queue as idle, * it will be removed from the rq in drm_sched_entity_fini() @@ -1441,8 +1446,15 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) * For now, this remains a potential race in all * drivers that keep entities alive for longer than * the scheduler. + * + * The READ_ONCE() is there to make the lockless read + * (warning about the lockless write below) slightly + * less broken... */ + if (!READ_ONCE(s_entity->stopped)) + dev_warn(sched->dev, "Tearing down scheduler with active entities!\n"); s_entity->stopped = true; + } spin_unlock(&rq->lock); kfree(sched->sched_rq[i]); } diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h index 7f31d35780cc..553d45abd057 100644 --- a/drivers/gpu/drm/scheduler/tests/sched_tests.h +++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h @@ -31,9 +31,8 @@ * * @base: DRM scheduler base class * @test: Backpointer to owning the kunit test case - * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list + * @lock: Lock to protect the simulated @hw_timeline and @job_list * @job_list: List of jobs submitted to the mock GPU - * @done_list: List of jobs completed by the mock GPU * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and * @cur_seqno for implementing a struct dma_fence signaling the * simulated job completion. diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c index a6c4a6738ded..4e73c8b415d6 100644 --- a/drivers/gpu/drm/sitronix/st7571-i2c.c +++ b/drivers/gpu/drm/sitronix/st7571-i2c.c @@ -263,6 +263,7 @@ static int st7571_fb_clear_screen(struct st7571_device *st7571) u32 npixels = st7571->ncols * round_up(st7571->nlines, ST7571_PAGE_HEIGHT) * st7571->bpp; char pixelvalue = 0x00; + st7571_set_position(st7571, 0, 0); for (int i = 0; i < npixels; i++) regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, &pixelvalue, 1); @@ -321,7 +322,7 @@ static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571, size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 4; memcpy(st7571->hwbuf, vmap->vaddr, size); break; - }; + } } static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct drm_rect *rect) diff --git a/drivers/gpu/drm/sitronix/st7586.c b/drivers/gpu/drm/sitronix/st7586.c index a29672d84ede..b57ebf37a664 100644 --- a/drivers/gpu/drm/sitronix/st7586.c +++ b/drivers/gpu/drm/sitronix/st7586.c @@ -25,6 +25,7 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> +#include <drm/drm_print.h> #include <drm/drm_rect.h> /* controller-specific commands */ diff --git a/drivers/gpu/drm/sitronix/st7735r.c b/drivers/gpu/drm/sitronix/st7735r.c index 1d60f6e5b3bc..c1f8228495f6 100644 --- a/drivers/gpu/drm/sitronix/st7735r.c +++ b/drivers/gpu/drm/sitronix/st7735r.c @@ -24,6 +24,7 @@ #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> +#include <drm/drm_print.h> #define ST7735R_FRMCTR1 0xb1 #define ST7735R_FRMCTR2 0xb2 diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index eec43d1a5595..96cf39320137 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -33,6 +33,7 @@ #include <drm/drm_managed.h> #include <drm/drm_modes.h> #include <drm/drm_rect.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "ssd130x.h" @@ -1016,15 +1017,9 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8); - ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); - if (ret) - return ret; - iosys_map_set_vaddr(&dst, buf); drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); - drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); - ssd130x_update_rect(ssd130x, rect, buf, data_array); return ret; @@ -1048,15 +1043,9 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb, dst_pitch = drm_rect_width(rect); - ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); - if (ret) - return ret; - iosys_map_set_vaddr(&dst, buf); drm_fb_xrgb8888_to_gray8(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); - drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); - ssd132x_update_rect(ssd130x, rect, buf, data_array); return ret; @@ -1078,15 +1067,9 @@ static int ssd133x_fb_blit_rect(struct drm_framebuffer *fb, dst_pitch = drm_format_info_min_pitch(fi, 0, drm_rect_width(rect)); - ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); - if (ret) - return ret; - iosys_map_set_vaddr(&dst, data_array); drm_fb_xrgb8888_to_rgb332(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); - drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); - ssd133x_update_rect(ssd130x, rect, data_array, dst_pitch); return ret; @@ -1232,6 +1215,9 @@ static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane, if (!drm_dev_enter(drm, &idx)) return; + if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE)) + goto out_drm_dev_exit; + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); drm_atomic_for_each_plane_damage(&iter, &damage) { dst_clip = plane_state->dst; @@ -1245,6 +1231,9 @@ static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane, &shadow_plane_state->fmtcnv_state); } + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); + +out_drm_dev_exit: drm_dev_exit(idx); } @@ -1267,6 +1256,9 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane, if (!drm_dev_enter(drm, &idx)) return; + if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE)) + goto out_drm_dev_exit; + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); drm_atomic_for_each_plane_damage(&iter, &damage) { dst_clip = plane_state->dst; @@ -1280,6 +1272,9 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane, &shadow_plane_state->fmtcnv_state); } + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); + +out_drm_dev_exit: drm_dev_exit(idx); } @@ -1301,6 +1296,9 @@ static void ssd133x_primary_plane_atomic_update(struct drm_plane *plane, if (!drm_dev_enter(drm, &idx)) return; + if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE)) + goto out_drm_dev_exit; + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); drm_atomic_for_each_plane_damage(&iter, &damage) { dst_clip = plane_state->dst; @@ -1313,6 +1311,9 @@ static void ssd133x_primary_plane_atomic_update(struct drm_plane *plane, &shadow_plane_state->fmtcnv_state); } + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); + +out_drm_dev_exit: drm_dev_exit(idx); } @@ -1393,7 +1394,7 @@ static void ssd130x_primary_plane_reset(struct drm_plane *plane) { struct ssd130x_plane_state *ssd130x_state; - WARN_ON(plane->state); + drm_WARN_ON_ONCE(plane->dev, plane->state); ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL); if (!ssd130x_state) @@ -1408,7 +1409,7 @@ static struct drm_plane_state *ssd130x_primary_plane_duplicate_state(struct drm_ struct ssd130x_plane_state *old_ssd130x_state; struct ssd130x_plane_state *ssd130x_state; - if (WARN_ON(!plane->state)) + if (drm_WARN_ON_ONCE(plane->dev, !plane->state)) return NULL; old_ssd130x_state = to_ssd130x_plane_state(plane->state); @@ -1473,15 +1474,7 @@ static enum drm_mode_status ssd130x_crtc_mode_valid(struct drm_crtc *crtc, { struct ssd130x_device *ssd130x = drm_to_ssd130x(crtc->dev); - if (mode->hdisplay != ssd130x->mode.hdisplay && - mode->vdisplay != ssd130x->mode.vdisplay) - return MODE_ONE_SIZE; - else if (mode->hdisplay != ssd130x->mode.hdisplay) - return MODE_ONE_WIDTH; - else if (mode->vdisplay != ssd130x->mode.vdisplay) - return MODE_ONE_HEIGHT; - - return MODE_OK; + return drm_crtc_helper_mode_valid_fixed(crtc, mode, &ssd130x->mode); } static int ssd130x_crtc_atomic_check(struct drm_crtc *crtc, @@ -1498,7 +1491,7 @@ static int ssd130x_crtc_atomic_check(struct drm_crtc *crtc, if (ret) return ret; - ssd130x_state->data_array = kmalloc(ssd130x->width * pages, GFP_KERNEL); + ssd130x_state->data_array = kmalloc_array(ssd130x->width, pages, GFP_KERNEL); if (!ssd130x_state->data_array) return -ENOMEM; @@ -1519,7 +1512,7 @@ static int ssd132x_crtc_atomic_check(struct drm_crtc *crtc, if (ret) return ret; - ssd130x_state->data_array = kmalloc(columns * ssd130x->height, GFP_KERNEL); + ssd130x_state->data_array = kmalloc_array(columns, ssd130x->height, GFP_KERNEL); if (!ssd130x_state->data_array) return -ENOMEM; @@ -1546,7 +1539,7 @@ static int ssd133x_crtc_atomic_check(struct drm_crtc *crtc, pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width); - ssd130x_state->data_array = kmalloc(pitch * ssd130x->height, GFP_KERNEL); + ssd130x_state->data_array = kmalloc_array(pitch, ssd130x->height, GFP_KERNEL); if (!ssd130x_state->data_array) return -ENOMEM; @@ -1558,7 +1551,7 @@ static void ssd130x_crtc_reset(struct drm_crtc *crtc) { struct ssd130x_crtc_state *ssd130x_state; - WARN_ON(crtc->state); + drm_WARN_ON_ONCE(crtc->dev, crtc->state); ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL); if (!ssd130x_state) @@ -1572,7 +1565,7 @@ static struct drm_crtc_state *ssd130x_crtc_duplicate_state(struct drm_crtc *crtc struct ssd130x_crtc_state *old_ssd130x_state; struct ssd130x_crtc_state *ssd130x_state; - if (WARN_ON(!crtc->state)) + if (drm_WARN_ON_ONCE(crtc->dev, !crtc->state)) return NULL; old_ssd130x_state = to_ssd130x_crtc_state(crtc->state); @@ -1740,20 +1733,8 @@ static const struct drm_encoder_funcs ssd130x_encoder_funcs = { static int ssd130x_connector_get_modes(struct drm_connector *connector) { struct ssd130x_device *ssd130x = drm_to_ssd130x(connector->dev); - struct drm_display_mode *mode; - struct device *dev = ssd130x->dev; - - mode = drm_mode_duplicate(connector->dev, &ssd130x->mode); - if (!mode) { - dev_err(dev, "Failed to duplicated mode\n"); - return 0; - } - - drm_mode_probed_add(connector, mode); - drm_set_preferred_mode(connector, mode->hdisplay, mode->vdisplay); - /* There is only a single mode */ - return 1; + return drm_connector_helper_get_modes_fixed(connector, &ssd130x->mode); } static const struct drm_connector_helper_funcs ssd130x_connector_helper_funcs = { @@ -1887,10 +1868,14 @@ static int ssd130x_init_modeset(struct ssd130x_device *ssd130x) mode->type = DRM_MODE_TYPE_DRIVER; mode->clock = 1; - mode->hdisplay = mode->htotal = ssd130x->width; - mode->hsync_start = mode->hsync_end = ssd130x->width; - mode->vdisplay = mode->vtotal = ssd130x->height; - mode->vsync_start = mode->vsync_end = ssd130x->height; + mode->hdisplay = ssd130x->width; + mode->htotal = ssd130x->width; + mode->hsync_start = ssd130x->width; + mode->hsync_end = ssd130x->width; + mode->vdisplay = ssd130x->height; + mode->vtotal = ssd130x->height; + mode->vsync_start = ssd130x->height; + mode->vsync_end = ssd130x->height; mode->width_mm = 27; mode->height_mm = 27; diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index c59fcb4dca32..4e12a465be7f 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -14,6 +14,7 @@ #include <drm/drm_fb_dma_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include "sti_compositor.h" #include "sti_cursor.h" diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 5e9332df21df..f16345f01065 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -22,6 +22,7 @@ #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "sti_drv.h" @@ -231,23 +232,15 @@ static const struct component_master_ops sti_ops = { static int sti_platform_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *node = dev->of_node; - struct device_node *child_np; - struct component_match *match = NULL; + int ret; - dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + if (ret) + return ret; devm_of_platform_populate(dev); - child_np = of_get_next_available_child(node, NULL); - - while (child_np) { - drm_of_component_match_add(dev, &match, component_compare_of, - child_np); - child_np = of_get_next_available_child(node, child_np); - } - - return component_master_add_with_match(dev, &sti_ops, match); + return drm_of_component_probe(dev, component_compare_of, &sti_ops); } static void sti_platform_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index f046f5f7ad25..1e5aa8c30645 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -16,6 +16,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include "sti_compositor.h" #include "sti_gdp.h" diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 2c015f563de9..b7397827889c 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -779,6 +779,8 @@ static int sti_hda_probe(struct platform_device *pdev) return PTR_ERR(hda->clk_hddac); } + drm_bridge_add(&hda->bridge); + platform_set_drvdata(pdev, hda); return component_add(&pdev->dev, &sti_hda_ops); @@ -786,7 +788,10 @@ static int sti_hda_probe(struct platform_device *pdev) static void sti_hda_remove(struct platform_device *pdev) { + struct sti_hda *hda = platform_get_drvdata(pdev); + component_del(&pdev->dev, &sti_hda_ops); + drm_bridge_remove(&hda->bridge); } static const struct of_device_id hda_of_match[] = { diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 4e7c3d78b2b9..f8222e60b1e0 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -1459,6 +1459,7 @@ static int sti_hdmi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hdmi); + drm_bridge_add(&hdmi->bridge); return component_add(&pdev->dev, &sti_hdmi_ops); release_adapter: @@ -1475,6 +1476,7 @@ static void sti_hdmi_remove(struct platform_device *pdev) if (hdmi->audio_pdev) platform_device_unregister(hdmi->audio_pdev); component_del(&pdev->dev, &sti_hdmi_ops); + drm_bridge_remove(&hdmi->bridge); } struct platform_driver sti_hdmi_driver = { diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index b76606e9a82d..57ef4ba3554e 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -20,6 +20,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include "sti_compositor.h" #include "sti_drv.h" diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c index 29e669ccec5b..948f947b5cad 100644 --- a/drivers/gpu/drm/sti/sti_plane.c +++ b/drivers/gpu/drm/sti/sti_plane.c @@ -12,6 +12,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include "sti_compositor.h" #include "sti_drv.h" diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index ee81691b3203..ce6bc7e7b135 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -143,12 +143,17 @@ struct sti_vtg { struct sti_vtg *of_vtg_find(struct device_node *np) { struct platform_device *pdev; + struct sti_vtg *vtg; pdev = of_find_device_by_node(np); if (!pdev) return NULL; - return (struct sti_vtg *)platform_get_drvdata(pdev); + vtg = platform_get_drvdata(pdev); + + put_device(&pdev->dev); + + return vtg; } static void vtg_reset(struct sti_vtg *vtg) diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index ab00d1a6140c..56d53ac3082d 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -25,6 +25,7 @@ #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c index 2c7bc064bc66..58eae6804cc8 100644 --- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c +++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c @@ -274,8 +274,8 @@ static unsigned long dw_mipi_dsi_clk_recalc_rate(struct clk_hw *hw, return (unsigned long)pll_out_khz * 1000; } -static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static int dw_mipi_dsi_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct dw_mipi_dsi_stm *dsi = clk_to_dw_mipi_dsi_stm(hw); unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz; @@ -283,14 +283,14 @@ static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate, DRM_DEBUG_DRIVER("\n"); - pll_in_khz = (unsigned int)(*parent_rate / 1000); + pll_in_khz = (unsigned int)(req->best_parent_rate / 1000); /* Compute best pll parameters */ idf = 0; ndiv = 0; odf = 0; - ret = dsi_pll_get_params(dsi, pll_in_khz, rate / 1000, + ret = dsi_pll_get_params(dsi, pll_in_khz, req->rate / 1000, &idf, &ndiv, &odf); if (ret) DRM_WARN("Warning dsi_pll_get_params(): bad params\n"); @@ -298,7 +298,9 @@ static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate, /* Get the adjusted pll out value */ pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf); - return pll_out_khz * 1000; + req->rate = pll_out_khz * 1000; + + return 0; } static int dw_mipi_dsi_clk_set_rate(struct clk_hw *hw, unsigned long rate, @@ -351,7 +353,7 @@ static const struct clk_ops dw_mipi_dsi_stm_clk_ops = { .disable = dw_mipi_dsi_clk_disable, .is_enabled = dw_mipi_dsi_clk_is_enabled, .recalc_rate = dw_mipi_dsi_clk_recalc_rate, - .round_rate = dw_mipi_dsi_clk_round_rate, + .determine_rate = dw_mipi_dsi_clk_determine_rate, .set_rate = dw_mipi_dsi_clk_set_rate, }; diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index d1501e86a5b1..f7e847cfa38f 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -34,6 +34,7 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c index 07788e8d3d83..fe38c0984b2b 100644 --- a/drivers/gpu/drm/stm/lvds.c +++ b/drivers/gpu/drm/stm/lvds.c @@ -682,8 +682,8 @@ static unsigned long lvds_pixel_clk_recalc_rate(struct clk_hw *hw, return (unsigned long)lvds->pixel_clock_rate; } -static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static int lvds_pixel_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct stm_lvds *lvds = container_of(hw, struct stm_lvds, lvds_ck_px); unsigned int pll_in_khz, bdiv = 0, mdiv = 0, ndiv = 0; @@ -703,7 +703,7 @@ static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate, mode = list_first_entry(&connector->modes, struct drm_display_mode, head); - pll_in_khz = (unsigned int)(*parent_rate / 1000); + pll_in_khz = (unsigned int)(req->best_parent_rate / 1000); if (lvds_is_dual_link(lvds->link_type)) multiplier = 2; @@ -719,14 +719,16 @@ static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate, lvds->pixel_clock_rate = (unsigned long)pll_get_clkout_khz(pll_in_khz, bdiv, mdiv, ndiv) * 1000 * multiplier / 7; - return lvds->pixel_clock_rate; + req->rate = lvds->pixel_clock_rate; + + return 0; } static const struct clk_ops lvds_pixel_clk_ops = { .enable = lvds_pixel_clk_enable, .disable = lvds_pixel_clk_disable, .recalc_rate = lvds_pixel_clk_recalc_rate, - .round_rate = lvds_pixel_clk_round_rate, + .determine_rate = lvds_pixel_clk_determine_rate, }; static const struct clk_init_data clk_data = { diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 2dded3b828df..40405a52a073 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -23,6 +23,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "sun4i_backend.h" diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index c11dfb2739fa..8a409eee1dca 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -22,6 +22,7 @@ #include <drm/drm_gem_dma_helper.h> #include <drm/drm_module.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c index 5ab1604f12dd..5e9c4b97c84c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_frontend.c +++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c @@ -19,6 +19,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_plane.h> +#include <drm/drm_print.h> #include "sun4i_drv.h" #include "sun4i_frontend.h" diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c index 12430b9d4e93..b1beadb9bb59 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c @@ -59,13 +59,15 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate, return best_rate; } -static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int sun4i_ddc_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct sun4i_ddc *ddc = hw_to_ddc(hw); - return sun4i_ddc_calc_divider(rate, *prate, ddc->pre_div, - ddc->m_offset, NULL, NULL); + req->rate = sun4i_ddc_calc_divider(req->rate, req->best_parent_rate, + ddc->pre_div, ddc->m_offset, NULL, NULL); + + return 0; } static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw, @@ -101,7 +103,7 @@ static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops sun4i_ddc_ops = { .recalc_rate = sun4i_ddc_recalc_rate, - .round_rate = sun4i_ddc_round_rate, + .determine_rate = sun4i_ddc_determine_rate, .set_rate = sun4i_ddc_set_rate, }; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c b/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c index 03d7de1911cd..4afb12bd5281 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c @@ -67,8 +67,8 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw, return parent_rate / val; } -static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static int sun4i_dclk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct sun4i_dclk *dclk = hw_to_dclk(hw); struct sun4i_tcon *tcon = dclk->tcon; @@ -77,7 +77,7 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, int i; for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) { - u64 ideal = (u64)rate * i; + u64 ideal = (u64)req->rate * i; unsigned long rounded; /* @@ -99,17 +99,19 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, goto out; } - if (abs(rate - rounded / i) < - abs(rate - best_parent / best_div)) { + if (abs(req->rate - rounded / i) < + abs(req->rate - best_parent / best_div)) { best_parent = rounded; best_div = i; } } out: - *parent_rate = best_parent; + req->best_parent_rate = best_parent; - return best_parent / best_div; + req->rate = best_parent / best_div; + + return 0; } static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, @@ -155,7 +157,7 @@ static const struct clk_ops sun4i_dclk_ops = { .is_enabled = sun4i_dclk_is_enabled, .recalc_rate = sun4i_dclk_recalc_rate, - .round_rate = sun4i_dclk_round_rate, + .determine_rate = sun4i_dclk_determine_rate, .set_rate = sun4i_dclk_set_rate, .get_phase = sun4i_dclk_get_phase, diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c index c100d29b1a89..ce81c12f511d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_csc.c +++ b/drivers/gpu/drm/sun4i/sun8i_csc.c @@ -3,11 +3,20 @@ * Copyright (C) Jernej Skrabec <jernej.skrabec@siol.net> */ +#include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_plane.h> #include <drm/drm_print.h> #include "sun8i_csc.h" #include "sun8i_mixer.h" +enum sun8i_csc_mode { + SUN8I_CSC_MODE_OFF, + SUN8I_CSC_MODE_YUV2RGB, + SUN8I_CSC_MODE_YVU2RGB, +}; + static const u32 ccsc_base[][2] = { [CCSC_MIXER0_LAYOUT] = {CCSC00_OFFSET, CCSC01_OFFSET}, [CCSC_MIXER1_LAYOUT] = {CCSC10_OFFSET, CCSC11_OFFSET}, @@ -107,23 +116,28 @@ static const u32 yuv2rgb_de3[2][3][12] = { }, }; -static void sun8i_csc_set_coefficients(struct regmap *map, u32 base, - enum sun8i_csc_mode mode, - enum drm_color_encoding encoding, - enum drm_color_range range) +static void sun8i_csc_setup(struct regmap *map, u32 base, + enum sun8i_csc_mode mode, + enum drm_color_encoding encoding, + enum drm_color_range range) { + u32 base_reg, val; const u32 *table; - u32 base_reg; int i; table = yuv2rgb[range][encoding]; switch (mode) { + case SUN8I_CSC_MODE_OFF: + val = 0; + break; case SUN8I_CSC_MODE_YUV2RGB: + val = SUN8I_CSC_CTRL_EN; base_reg = SUN8I_CSC_COEFF(base, 0); regmap_bulk_write(map, base_reg, table, 12); break; case SUN8I_CSC_MODE_YVU2RGB: + val = SUN8I_CSC_CTRL_EN; for (i = 0; i < 12; i++) { if ((i & 3) == 1) base_reg = SUN8I_CSC_COEFF(base, i + 1); @@ -135,28 +149,37 @@ static void sun8i_csc_set_coefficients(struct regmap *map, u32 base, } break; default: + val = 0; DRM_WARN("Wrong CSC mode specified.\n"); return; } + + regmap_write(map, SUN8I_CSC_CTRL(base), val); } -static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer, - enum sun8i_csc_mode mode, - enum drm_color_encoding encoding, - enum drm_color_range range) +static void sun8i_de3_ccsc_setup(struct regmap *map, int layer, + enum sun8i_csc_mode mode, + enum drm_color_encoding encoding, + enum drm_color_range range) { + u32 addr, val, mask; const u32 *table; - u32 addr; int i; + mask = SUN50I_MIXER_BLEND_CSC_CTL_EN(layer); table = yuv2rgb_de3[range][encoding]; switch (mode) { + case SUN8I_CSC_MODE_OFF: + val = 0; + break; case SUN8I_CSC_MODE_YUV2RGB: + val = mask; addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0); regmap_bulk_write(map, addr, table, 12); break; case SUN8I_CSC_MODE_YVU2RGB: + val = mask; for (i = 0; i < 12; i++) { if ((i & 3) == 1) addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, @@ -173,67 +196,53 @@ static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer, } break; default: + val = 0; DRM_WARN("Wrong CSC mode specified.\n"); return; } -} - -static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable) -{ - u32 val; - - if (enable) - val = SUN8I_CSC_CTRL_EN; - else - val = 0; - - regmap_update_bits(map, SUN8I_CSC_CTRL(base), SUN8I_CSC_CTRL_EN, val); -} - -static void sun8i_de3_ccsc_enable(struct regmap *map, int layer, bool enable) -{ - u32 val, mask; - - mask = SUN50I_MIXER_BLEND_CSC_CTL_EN(layer); - - if (enable) - val = mask; - else - val = 0; regmap_update_bits(map, SUN50I_MIXER_BLEND_CSC_CTL(DE3_BLD_BASE), mask, val); } -void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer, - enum sun8i_csc_mode mode, - enum drm_color_encoding encoding, - enum drm_color_range range) +static u32 sun8i_csc_get_mode(struct drm_plane_state *state) { - u32 base; + const struct drm_format_info *format; - if (mixer->cfg->de_type == SUN8I_MIXER_DE3) { - sun8i_de3_ccsc_set_coefficients(mixer->engine.regs, layer, - mode, encoding, range); - return; - } + if (!state->crtc || !state->visible) + return SUN8I_CSC_MODE_OFF; - base = ccsc_base[mixer->cfg->ccsc][layer]; + format = state->fb->format; + if (!format->is_yuv) + return SUN8I_CSC_MODE_OFF; - sun8i_csc_set_coefficients(mixer->engine.regs, base, - mode, encoding, range); + switch (format->format) { + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YVU444: + return SUN8I_CSC_MODE_YVU2RGB; + default: + return SUN8I_CSC_MODE_YUV2RGB; + } } -void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable) +void sun8i_csc_config(struct sun8i_layer *layer, + struct drm_plane_state *state) { + u32 mode = sun8i_csc_get_mode(state); u32 base; - if (mixer->cfg->de_type == SUN8I_MIXER_DE3) { - sun8i_de3_ccsc_enable(mixer->engine.regs, layer, enable); + if (layer->cfg->de_type == SUN8I_MIXER_DE3) { + sun8i_de3_ccsc_setup(layer->regs, layer->channel, + mode, state->color_encoding, + state->color_range); return; } - base = ccsc_base[mixer->cfg->ccsc][layer]; + base = ccsc_base[layer->cfg->ccsc][layer->channel]; - sun8i_csc_enable(mixer->engine.regs, base, enable); + sun8i_csc_setup(layer->regs, base, + mode, state->color_encoding, + state->color_range); } diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h index 828b86fd0cab..2a4b79599610 100644 --- a/drivers/gpu/drm/sun4i/sun8i_csc.h +++ b/drivers/gpu/drm/sun4i/sun8i_csc.h @@ -8,7 +8,8 @@ #include <drm/drm_color_mgmt.h> -struct sun8i_mixer; +struct drm_plane_state; +struct sun8i_layer; /* VI channel CSC units offsets */ #define CCSC00_OFFSET 0xAA050 @@ -22,16 +23,7 @@ struct sun8i_mixer; #define SUN8I_CSC_CTRL_EN BIT(0) -enum sun8i_csc_mode { - SUN8I_CSC_MODE_OFF, - SUN8I_CSC_MODE_YUV2RGB, - SUN8I_CSC_MODE_YVU2RGB, -}; - -void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer, - enum sun8i_csc_mode mode, - enum drm_color_encoding encoding, - enum drm_color_range range); -void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable); +void sun8i_csc_config(struct sun8i_layer *layer, + struct drm_plane_state *state); #endif diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 31a8409b98f4..ce9c155bfad7 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -21,6 +21,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "sun4i_drv.h" @@ -250,24 +251,6 @@ int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format) return -EINVAL; } -static void sun8i_layer_enable(struct sun8i_layer *layer, bool enable) -{ - u32 ch_base = sun8i_channel_base(layer->mixer, layer->channel); - u32 val, reg, mask; - - if (layer->type == SUN8I_LAYER_TYPE_UI) { - val = enable ? SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN : 0; - mask = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN; - reg = SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, layer->overlay); - } else { - val = enable ? SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN : 0; - mask = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN; - reg = SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, layer->overlay); - } - - regmap_update_bits(layer->mixer->engine.regs, reg, mask, val); -} - static void sun8i_mixer_commit(struct sunxi_engine *engine, struct drm_crtc *crtc, struct drm_atomic_state *state) @@ -283,10 +266,10 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine, drm_for_each_plane(plane, state->dev) { struct sun8i_layer *layer = plane_to_sun8i_layer(plane); + int w, h, x, y, zpos; bool enable; - int zpos; - if (!(plane->possible_crtcs & drm_crtc_mask(crtc)) || layer->mixer != mixer) + if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) continue; plane_state = drm_atomic_get_new_plane_state(state, plane); @@ -295,23 +278,28 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine, enable = plane_state->crtc && plane_state->visible; zpos = plane_state->normalized_zpos; + x = plane_state->dst.x1; + y = plane_state->dst.y1; + w = drm_rect_width(&plane_state->dst); + h = drm_rect_height(&plane_state->dst); - DRM_DEBUG_DRIVER(" plane %d: chan=%d ovl=%d en=%d zpos=%d\n", - plane->base.id, layer->channel, layer->overlay, - enable, zpos); - - /* - * We always update the layer enable bit, because it can clear - * spontaneously for unknown reasons. - */ - sun8i_layer_enable(layer, enable); + DRM_DEBUG_DRIVER(" plane %d: chan=%d ovl=%d en=%d zpos=%d x=%d y=%d w=%d h=%d\n", + plane->base.id, layer->index, layer->overlay, + enable, zpos, x, y, w, h); if (!enable) continue; /* Route layer to pipe based on zpos */ - route |= layer->channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos); + route |= layer->index << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos); pipe_en |= SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos); + + regmap_write(bld_regs, + SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos), + SUN8I_MIXER_COORD(x, y)); + regmap_write(bld_regs, + SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos), + SUN8I_MIXER_SIZE(w, h)); } regmap_write(bld_regs, SUN8I_MIXER_BLEND_ROUTE(bld_base), route); @@ -328,18 +316,30 @@ static struct drm_plane **sun8i_layers_init(struct drm_device *drm, { struct drm_plane **planes; struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine); + int plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num; + enum drm_plane_type type; + unsigned int phy_index; int i; - planes = devm_kcalloc(drm->dev, - mixer->cfg->vi_num + mixer->cfg->ui_num + 1, - sizeof(*planes), GFP_KERNEL); + planes = devm_kcalloc(drm->dev, plane_cnt, sizeof(*planes), GFP_KERNEL); if (!planes) return ERR_PTR(-ENOMEM); for (i = 0; i < mixer->cfg->vi_num; i++) { struct sun8i_layer *layer; - layer = sun8i_vi_layer_init_one(drm, mixer, i); + if (i == 0 && !mixer->cfg->ui_num) + type = DRM_PLANE_TYPE_PRIMARY; + else + type = DRM_PLANE_TYPE_OVERLAY; + + phy_index = i; + if (mixer->cfg->de_type == SUN8I_MIXER_DE33) + phy_index = mixer->cfg->map[i]; + + layer = sun8i_vi_layer_init_one(drm, type, mixer->engine.regs, + i, phy_index, plane_cnt, + &mixer->cfg->lay_cfg); if (IS_ERR(layer)) { dev_err(drm->dev, "Couldn't initialize overlay plane\n"); @@ -350,16 +350,28 @@ static struct drm_plane **sun8i_layers_init(struct drm_device *drm, } for (i = 0; i < mixer->cfg->ui_num; i++) { + unsigned int index = mixer->cfg->vi_num + i; struct sun8i_layer *layer; - layer = sun8i_ui_layer_init_one(drm, mixer, i); + if (i == 0) + type = DRM_PLANE_TYPE_PRIMARY; + else + type = DRM_PLANE_TYPE_OVERLAY; + + phy_index = index; + if (mixer->cfg->de_type == SUN8I_MIXER_DE33) + phy_index = mixer->cfg->map[index]; + + layer = sun8i_ui_layer_init_one(drm, type, mixer->engine.regs, + index, phy_index, plane_cnt, + &mixer->cfg->lay_cfg); if (IS_ERR(layer)) { dev_err(drm->dev, "Couldn't initialize %s plane\n", i ? "overlay" : "primary"); return ERR_CAST(layer); } - planes[mixer->cfg->vi_num + i] = &layer->plane; + planes[index] = &layer->plane; } return planes; @@ -692,119 +704,173 @@ static void sun8i_mixer_remove(struct platform_device *pdev) } static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = { - .ccsc = CCSC_MIXER0_LAYOUT, + .lay_cfg = { + .ccsc = CCSC_MIXER0_LAYOUT, + .de_type = SUN8I_MIXER_DE2, + .vi_scaler_num = 1, + .scaler_mask = 0xf, + .scanline_yuv = 2048, + .de2_fcc_alpha = 1, + }, .de_type = SUN8I_MIXER_DE2, - .scaler_mask = 0xf, - .scanline_yuv = 2048, .ui_num = 3, .vi_num = 1, }; static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = { - .ccsc = CCSC_MIXER1_LAYOUT, + .lay_cfg = { + .ccsc = CCSC_MIXER1_LAYOUT, + .de_type = SUN8I_MIXER_DE2, + .vi_scaler_num = 1, + .scaler_mask = 0x3, + .scanline_yuv = 2048, + .de2_fcc_alpha = 1, + }, .de_type = SUN8I_MIXER_DE2, - .scaler_mask = 0x3, - .scanline_yuv = 2048, .ui_num = 1, .vi_num = 1, }; static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = { - .ccsc = CCSC_MIXER0_LAYOUT, + .lay_cfg = { + .ccsc = CCSC_MIXER0_LAYOUT, + .de_type = SUN8I_MIXER_DE2, + .vi_scaler_num = 1, + .scaler_mask = 0xf, + .scanline_yuv = 2048, + .de2_fcc_alpha = 1, + }, .de_type = SUN8I_MIXER_DE2, .mod_rate = 432000000, - .scaler_mask = 0xf, - .scanline_yuv = 2048, .ui_num = 3, .vi_num = 1, }; static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = { - .ccsc = CCSC_MIXER0_LAYOUT, + .lay_cfg = { + .ccsc = CCSC_MIXER0_LAYOUT, + .de_type = SUN8I_MIXER_DE2, + .vi_scaler_num = 1, + .scaler_mask = 0xf, + .scanline_yuv = 2048, + .de2_fcc_alpha = 1, + }, .de_type = SUN8I_MIXER_DE2, .mod_rate = 297000000, - .scaler_mask = 0xf, - .scanline_yuv = 2048, .ui_num = 3, .vi_num = 1, }; static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = { - .ccsc = CCSC_MIXER1_LAYOUT, + .lay_cfg = { + .ccsc = CCSC_MIXER1_LAYOUT, + .de_type = SUN8I_MIXER_DE2, + .vi_scaler_num = 1, + .scaler_mask = 0x3, + .scanline_yuv = 2048, + .de2_fcc_alpha = 1, + }, .de_type = SUN8I_MIXER_DE2, .mod_rate = 297000000, - .scaler_mask = 0x3, - .scanline_yuv = 2048, .ui_num = 1, .vi_num = 1, }; static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { - .de_type = SUN8I_MIXER_DE2, - .vi_num = 2, - .ui_num = 1, - .scaler_mask = 0x3, - .scanline_yuv = 2048, - .ccsc = CCSC_MIXER0_LAYOUT, - .mod_rate = 150000000, + .lay_cfg = { + .ccsc = CCSC_MIXER0_LAYOUT, + .de_type = SUN8I_MIXER_DE2, + .vi_scaler_num = 2, + .scaler_mask = 0x3, + .scanline_yuv = 2048, + }, + .de_type = SUN8I_MIXER_DE2, + .mod_rate = 150000000, + .vi_num = 2, + .ui_num = 1, }; static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = { - .ccsc = CCSC_D1_MIXER0_LAYOUT, + .lay_cfg = { + .ccsc = CCSC_D1_MIXER0_LAYOUT, + .de_type = SUN8I_MIXER_DE2, + .vi_scaler_num = 1, + .scaler_mask = 0x3, + .scanline_yuv = 2048, + .de2_fcc_alpha = 1, + }, .de_type = SUN8I_MIXER_DE2, .mod_rate = 297000000, - .scaler_mask = 0x3, - .scanline_yuv = 2048, .ui_num = 1, .vi_num = 1, }; static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = { - .ccsc = CCSC_MIXER1_LAYOUT, + .lay_cfg = { + .ccsc = CCSC_MIXER1_LAYOUT, + .de_type = SUN8I_MIXER_DE2, + .vi_scaler_num = 1, + .scaler_mask = 0x1, + .scanline_yuv = 1024, + .de2_fcc_alpha = 1, + }, .de_type = SUN8I_MIXER_DE2, .mod_rate = 297000000, - .scaler_mask = 0x1, - .scanline_yuv = 1024, .ui_num = 0, .vi_num = 1, }; static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = { - .ccsc = CCSC_MIXER0_LAYOUT, + .lay_cfg = { + .ccsc = CCSC_MIXER0_LAYOUT, + .de_type = SUN8I_MIXER_DE2, + .vi_scaler_num = 1, + .scaler_mask = 0xf, + .scanline_yuv = 4096, + .de2_fcc_alpha = 1, + }, .de_type = SUN8I_MIXER_DE2, .mod_rate = 297000000, - .scaler_mask = 0xf, - .scanline_yuv = 4096, .ui_num = 3, .vi_num = 1, }; static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = { - .ccsc = CCSC_MIXER1_LAYOUT, + .lay_cfg = { + .ccsc = CCSC_MIXER1_LAYOUT, + .de_type = SUN8I_MIXER_DE2, + .vi_scaler_num = 1, + .scaler_mask = 0x3, + .scanline_yuv = 2048, + .de2_fcc_alpha = 1, + }, .de_type = SUN8I_MIXER_DE2, .mod_rate = 297000000, - .scaler_mask = 0x3, - .scanline_yuv = 2048, .ui_num = 1, .vi_num = 1, }; static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = { - .ccsc = CCSC_MIXER0_LAYOUT, + .lay_cfg = { + .de_type = SUN8I_MIXER_DE3, + .vi_scaler_num = 1, + .scaler_mask = 0xf, + .scanline_yuv = 4096, + }, .de_type = SUN8I_MIXER_DE3, .mod_rate = 600000000, - .scaler_mask = 0xf, - .scanline_yuv = 4096, .ui_num = 3, .vi_num = 1, }; static const struct sun8i_mixer_cfg sun50i_h616_mixer0_cfg = { - .ccsc = CCSC_MIXER0_LAYOUT, + .lay_cfg = { + .de_type = SUN8I_MIXER_DE33, + .scaler_mask = 0xf, + .scanline_yuv = 4096, + }, .de_type = SUN8I_MIXER_DE33, .mod_rate = 600000000, - .scaler_mask = 0xf, - .scanline_yuv = 4096, .ui_num = 3, .vi_num = 1, .map = {0, 6, 7, 8}, diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index a1c1cbccc654..e2f83301aae8 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -39,6 +39,9 @@ #define DE3_CH_BASE 0x1000 #define DE3_CH_SIZE 0x0800 +#define DE33_CH_BASE 0x1000 +#define DE33_CH_SIZE 0x20000 + #define SUN8I_MIXER_BLEND_PIPE_CTL(base) ((base) + 0) #define SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, x) ((base) + 0x4 + 0x10 * (x)) #define SUN8I_MIXER_BLEND_ATTR_INSIZE(base, x) ((base) + 0x8 + 0x10 * (x)) @@ -161,29 +164,45 @@ enum sun8i_mixer_type { }; /** - * struct sun8i_mixer_cfg - mixer HW configuration - * @vi_num: number of VI channels - * @ui_num: number of UI channels + * struct sun8i_layer_cfg - layer configuration + * @vi_scaler_num: Number of VI scalers. Used on DE2 and DE3. * @scaler_mask: bitmask which tells which channel supports scaling * First, scaler supports for VI channels is defined and after that, scaler * support for UI channels. For example, if mixer has 2 VI channels without * scaler and 2 UI channels with scaler, bitmask would be 0xC. * @ccsc: select set of CCSC base addresses from the enumeration above. - * @mod_rate: module clock rate that needs to be set in order to have - * a functional block. * @de_type: sun8i_mixer_type enum representing the display engine generation. * @scaline_yuv: size of a scanline for VI scaler for YUV formats. - * @map: channel map for DE variants processing YUV separately (DE33) + * @de2_fcc_alpha: use FCC for missing DE2 VI alpha capability + * Most DE2 cores has FCC. If number of VI planes is one, enable this. */ -struct sun8i_mixer_cfg { - int vi_num; - int ui_num; +struct sun8i_layer_cfg { + unsigned int vi_scaler_num; int scaler_mask; int ccsc; - unsigned long mod_rate; unsigned int de_type; unsigned int scanline_yuv; - unsigned int map[6]; + unsigned int de2_fcc_alpha : 1; +}; + +/** + * struct sun8i_mixer_cfg - mixer HW configuration + * @lay_cfg: layer configuration + * @vi_num: number of VI channels + * @ui_num: number of UI channels + * @de_type: sun8i_mixer_type enum representing the display engine generation. + * @mod_rate: module clock rate that needs to be set in order to have + * a functional block. + * @map: channel map for DE variants processing YUV separately (DE33) + */ + +struct sun8i_mixer_cfg { + struct sun8i_layer_cfg lay_cfg; + int vi_num; + int ui_num; + unsigned int de_type; + unsigned long mod_rate; + unsigned int map[6]; }; struct sun8i_mixer { @@ -206,11 +225,13 @@ enum { }; struct sun8i_layer { - struct drm_plane plane; - struct sun8i_mixer *mixer; - int type; - int channel; - int overlay; + struct drm_plane plane; + int type; + int index; + int channel; + int overlay; + struct regmap *regs; + const struct sun8i_layer_cfg *cfg; }; static inline struct sun8i_layer * @@ -239,14 +260,14 @@ sun8i_blender_regmap(struct sun8i_mixer *mixer) } static inline u32 -sun8i_channel_base(struct sun8i_mixer *mixer, int channel) +sun8i_channel_base(struct sun8i_layer *layer) { - if (mixer->cfg->de_type == SUN8I_MIXER_DE33) - return mixer->cfg->map[channel] * 0x20000 + DE2_CH_SIZE; - else if (mixer->cfg->de_type == SUN8I_MIXER_DE3) - return DE3_CH_BASE + channel * DE3_CH_SIZE; + if (layer->cfg->de_type == SUN8I_MIXER_DE33) + return DE33_CH_BASE + layer->channel * DE33_CH_SIZE; + else if (layer->cfg->de_type == SUN8I_MIXER_DE3) + return DE3_CH_BASE + layer->channel * DE3_CH_SIZE; else - return DE2_CH_BASE + channel * DE2_CH_SIZE; + return DE2_CH_BASE + layer->channel * DE2_CH_SIZE; } int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format); diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c index f97be0040aab..f08f6da55dd0 100644 --- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c @@ -18,6 +18,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "sun8i_mixer.h" @@ -25,44 +26,49 @@ #include "sun8i_ui_scaler.h" #include "sun8i_vi_scaler.h" -static void sun8i_ui_layer_update_alpha(struct sun8i_mixer *mixer, int channel, - int overlay, struct drm_plane *plane) +static void sun8i_ui_layer_disable(struct sun8i_layer *layer) { - u32 mask, val, ch_base; + u32 ch_base = sun8i_channel_base(layer); - ch_base = sun8i_channel_base(mixer, channel); + regmap_write(layer->regs, + SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, layer->overlay), 0); +} - mask = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK | - SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK; +static void sun8i_ui_layer_update_attributes(struct sun8i_layer *layer, + struct drm_plane *plane) +{ + struct drm_plane_state *state = plane->state; + const struct drm_format_info *fmt; + u32 val, ch_base, hw_fmt; - val = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA(plane->state->alpha >> 8); + ch_base = sun8i_channel_base(layer); + fmt = state->fb->format; + sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt); - val |= (plane->state->alpha == DRM_BLEND_ALPHA_OPAQUE) ? + val = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA(state->alpha >> 8); + val |= (state->alpha == DRM_BLEND_ALPHA_OPAQUE) ? SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_PIXEL : SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_COMBINED; + val |= hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET; + val |= SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN; - regmap_update_bits(mixer->engine.regs, - SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay), - mask, val); + regmap_write(layer->regs, + SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, layer->overlay), val); } -static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel, - int overlay, struct drm_plane *plane, - unsigned int zpos) +static void sun8i_ui_layer_update_coord(struct sun8i_layer *layer, + struct drm_plane *plane) { struct drm_plane_state *state = plane->state; u32 src_w, src_h, dst_w, dst_h; - struct regmap *bld_regs; - u32 bld_base, ch_base; u32 outsize, insize; u32 hphase, vphase; + u32 ch_base; DRM_DEBUG_DRIVER("Updating UI channel %d overlay %d\n", - channel, overlay); + layer->channel, layer->overlay); - bld_base = sun8i_blender_base(mixer); - bld_regs = sun8i_blender_regmap(mixer); - ch_base = sun8i_channel_base(mixer, channel); + ch_base = sun8i_channel_base(layer); src_w = drm_rect_width(&state->src) >> 16; src_h = drm_rect_height(&state->src) >> 16; @@ -79,10 +85,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel, DRM_DEBUG_DRIVER("Layer source offset X: %d Y: %d\n", state->src.x1 >> 16, state->src.y1 >> 16); DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h); - regmap_write(mixer->engine.regs, - SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch_base, overlay), + regmap_write(layer->regs, + SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch_base, layer->overlay), insize); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch_base), insize); @@ -94,67 +100,27 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel, hscale = state->src_w / state->crtc_w; vscale = state->src_h / state->crtc_h; - if (mixer->cfg->de_type == SUN8I_MIXER_DE33) { - sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, - dst_w, dst_h, hscale, vscale, - hphase, vphase, + if (layer->cfg->de_type == SUN8I_MIXER_DE33) { + sun8i_vi_scaler_setup(layer, src_w, src_h, dst_w, dst_h, + hscale, vscale, hphase, vphase, state->fb->format); - sun8i_vi_scaler_enable(mixer, channel, true); + sun8i_vi_scaler_enable(layer, true); } else { - sun8i_ui_scaler_setup(mixer, channel, src_w, src_h, - dst_w, dst_h, hscale, vscale, - hphase, vphase); - sun8i_ui_scaler_enable(mixer, channel, true); + sun8i_ui_scaler_setup(layer, src_w, src_h, dst_w, dst_h, + hscale, vscale, hphase, vphase); + sun8i_ui_scaler_enable(layer, true); } } else { DRM_DEBUG_DRIVER("HW scaling is not needed\n"); - if (mixer->cfg->de_type == SUN8I_MIXER_DE33) - sun8i_vi_scaler_enable(mixer, channel, false); + if (layer->cfg->de_type == SUN8I_MIXER_DE33) + sun8i_vi_scaler_enable(layer, false); else - sun8i_ui_scaler_enable(mixer, channel, false); + sun8i_ui_scaler_enable(layer, false); } - - /* Set base coordinates */ - DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n", - state->dst.x1, state->dst.y1); - DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h); - regmap_write(bld_regs, - SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos), - SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1)); - regmap_write(bld_regs, - SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos), - outsize); - - return 0; } -static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel, - int overlay, struct drm_plane *plane) -{ - struct drm_plane_state *state = plane->state; - const struct drm_format_info *fmt; - u32 val, ch_base, hw_fmt; - int ret; - - ch_base = sun8i_channel_base(mixer, channel); - - fmt = state->fb->format; - ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt); - if (ret || fmt->is_yuv) { - DRM_DEBUG_DRIVER("Invalid format\n"); - return -EINVAL; - } - - val = hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET; - regmap_update_bits(mixer->engine.regs, - SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay), - SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val); - - return 0; -} - -static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel, - int overlay, struct drm_plane *plane) +static void sun8i_ui_layer_update_buffer(struct sun8i_layer *layer, + struct drm_plane *plane) { struct drm_plane_state *state = plane->state; struct drm_framebuffer *fb = state->fb; @@ -163,7 +129,7 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel, u32 ch_base; int bpp; - ch_base = sun8i_channel_base(mixer, channel); + ch_base = sun8i_channel_base(layer); /* Get the physical address of the buffer in memory */ gem = drm_fb_dma_get_gem_obj(fb, 0); @@ -180,17 +146,15 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel, /* Set the line width */ DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]); - regmap_write(mixer->engine.regs, - SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, overlay), + regmap_write(layer->regs, + SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, layer->overlay), fb->pitches[0]); DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr); - regmap_write(mixer->engine.regs, - SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, overlay), + regmap_write(layer->regs, + SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, layer->overlay), lower_32_bits(dma_addr)); - - return 0; } static int sun8i_ui_layer_atomic_check(struct drm_plane *plane, @@ -201,20 +165,28 @@ static int sun8i_ui_layer_atomic_check(struct drm_plane *plane, struct sun8i_layer *layer = plane_to_sun8i_layer(plane); struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *crtc_state; - int min_scale, max_scale; + const struct drm_format_info *fmt; + int min_scale, max_scale, ret; + u32 hw_fmt; if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; + fmt = new_plane_state->fb->format; + ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt); + if (ret || fmt->is_yuv) { + DRM_DEBUG_DRIVER("Invalid plane format\n"); + return -EINVAL; + } + min_scale = DRM_PLANE_NO_SCALING; max_scale = DRM_PLANE_NO_SCALING; - if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) { + if (layer->cfg->scaler_mask & BIT(layer->channel)) { min_scale = SUN8I_UI_SCALER_SCALE_MIN; max_scale = SUN8I_UI_SCALER_SCALE_MAX; } @@ -232,20 +204,15 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane, struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct sun8i_layer *layer = plane_to_sun8i_layer(plane); - unsigned int zpos = new_state->normalized_zpos; - struct sun8i_mixer *mixer = layer->mixer; - if (!new_state->crtc || !new_state->visible) + if (!new_state->crtc || !new_state->visible) { + sun8i_ui_layer_disable(layer); return; + } - sun8i_ui_layer_update_coord(mixer, layer->channel, - layer->overlay, plane, zpos); - sun8i_ui_layer_update_alpha(mixer, layer->channel, - layer->overlay, plane); - sun8i_ui_layer_update_formats(mixer, layer->channel, - layer->overlay, plane); - sun8i_ui_layer_update_buffer(mixer, layer->channel, - layer->overlay, plane); + sun8i_ui_layer_update_attributes(layer, plane); + sun8i_ui_layer_update_coord(layer, plane); + sun8i_ui_layer_update_buffer(layer, plane); } static const struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = { @@ -291,21 +258,25 @@ static const uint64_t sun8i_layer_modifiers[] = { }; struct sun8i_layer *sun8i_ui_layer_init_one(struct drm_device *drm, - struct sun8i_mixer *mixer, - int index) + enum drm_plane_type type, + struct regmap *regs, + int index, int phy_index, + int plane_cnt, + const struct sun8i_layer_cfg *cfg) { - enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY; - int channel = mixer->cfg->vi_num + index; struct sun8i_layer *layer; - unsigned int plane_cnt; int ret; layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL); if (!layer) return ERR_PTR(-ENOMEM); - if (index == 0) - type = DRM_PLANE_TYPE_PRIMARY; + layer->type = SUN8I_LAYER_TYPE_UI; + layer->index = index; + layer->channel = phy_index; + layer->overlay = 0; + layer->regs = regs; + layer->cfg = cfg; /* possible crtcs are set later */ ret = drm_universal_plane_init(drm, &layer->plane, 0, @@ -318,15 +289,13 @@ struct sun8i_layer *sun8i_ui_layer_init_one(struct drm_device *drm, return ERR_PTR(ret); } - plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num; - ret = drm_plane_create_alpha_property(&layer->plane); if (ret) { dev_err(drm->dev, "Couldn't add alpha property\n"); return ERR_PTR(ret); } - ret = drm_plane_create_zpos_property(&layer->plane, channel, + ret = drm_plane_create_zpos_property(&layer->plane, index, 0, plane_cnt - 1); if (ret) { dev_err(drm->dev, "Couldn't add zpos property\n"); @@ -334,10 +303,6 @@ struct sun8i_layer *sun8i_ui_layer_init_one(struct drm_device *drm, } drm_plane_helper_add(&layer->plane, &sun8i_ui_layer_helper_funcs); - layer->mixer = mixer; - layer->type = SUN8I_LAYER_TYPE_UI; - layer->channel = channel; - layer->overlay = 0; return layer; } diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h index 83892f6ff211..1581ffc6d4e5 100644 --- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h +++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h @@ -50,6 +50,9 @@ struct sun8i_mixer; struct sun8i_layer; struct sun8i_layer *sun8i_ui_layer_init_one(struct drm_device *drm, - struct sun8i_mixer *mixer, - int index); + enum drm_plane_type type, + struct regmap *regs, + int index, int phy_index, + int plane_cnt, + const struct sun8i_layer_cfg *cfg); #endif /* _SUN8I_UI_LAYER_H_ */ diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c index 8b7a58e27517..a178da8f532a 100644 --- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c +++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c @@ -89,18 +89,18 @@ static const u32 lan2coefftab16[240] = { 0x0b1c1603, 0x0d1c1502, 0x0e1d1401, 0x0f1d1301, }; -static u32 sun8i_ui_scaler_base(struct sun8i_mixer *mixer, int channel) +static u32 sun8i_ui_scaler_base(struct sun8i_layer *layer) { - int vi_num = mixer->cfg->vi_num; + int offset = layer->cfg->vi_scaler_num; - if (mixer->cfg->de_type == SUN8I_MIXER_DE3) + if (layer->cfg->de_type == SUN8I_MIXER_DE3) return DE3_VI_SCALER_UNIT_BASE + - DE3_VI_SCALER_UNIT_SIZE * vi_num + - DE3_UI_SCALER_UNIT_SIZE * (channel - vi_num); + DE3_VI_SCALER_UNIT_SIZE * offset + + DE3_UI_SCALER_UNIT_SIZE * (layer->channel - offset); else return DE2_VI_SCALER_UNIT_BASE + - DE2_VI_SCALER_UNIT_SIZE * vi_num + - DE2_UI_SCALER_UNIT_SIZE * (channel - vi_num); + DE2_VI_SCALER_UNIT_SIZE * offset + + DE2_UI_SCALER_UNIT_SIZE * (layer->channel - offset); } static int sun8i_ui_scaler_coef_index(unsigned int step) @@ -127,14 +127,11 @@ static int sun8i_ui_scaler_coef_index(unsigned int step) } } -void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable) +void sun8i_ui_scaler_enable(struct sun8i_layer *layer, bool enable) { u32 val, base; - if (WARN_ON(layer < mixer->cfg->vi_num)) - return; - - base = sun8i_ui_scaler_base(mixer, layer); + base = sun8i_ui_scaler_base(layer); if (enable) val = SUN8I_SCALER_GSU_CTRL_EN | @@ -142,10 +139,10 @@ void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable) else val = 0; - regmap_write(mixer->engine.regs, SUN8I_SCALER_GSU_CTRL(base), val); + regmap_write(layer->regs, SUN8I_SCALER_GSU_CTRL(base), val); } -void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer, +void sun8i_ui_scaler_setup(struct sun8i_layer *layer, u32 src_w, u32 src_h, u32 dst_w, u32 dst_h, u32 hscale, u32 vscale, u32 hphase, u32 vphase) { @@ -153,10 +150,7 @@ void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer, int i, offset; u32 base; - if (WARN_ON(layer < mixer->cfg->vi_num)) - return; - - base = sun8i_ui_scaler_base(mixer, layer); + base = sun8i_ui_scaler_base(layer); hphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16; vphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16; @@ -166,22 +160,22 @@ void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer, insize = SUN8I_UI_SCALER_SIZE(src_w, src_h); outsize = SUN8I_UI_SCALER_SIZE(dst_w, dst_h); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_GSU_OUTSIZE(base), outsize); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_GSU_INSIZE(base), insize); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_GSU_HSTEP(base), hscale); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_GSU_VSTEP(base), vscale); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_GSU_HPHASE(base), hphase); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_GSU_VPHASE(base), vphase); offset = sun8i_ui_scaler_coef_index(hscale) * SUN8I_UI_SCALER_COEFF_COUNT; for (i = 0; i < SUN8I_UI_SCALER_COEFF_COUNT; i++) - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_GSU_HCOEFF(base, i), lan2coefftab16[offset + i]); } diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h index 1ef4bd6f2718..872d88a58e7e 100644 --- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h +++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h @@ -35,8 +35,8 @@ #define SUN8I_SCALER_GSU_CTRL_EN BIT(0) #define SUN8I_SCALER_GSU_CTRL_COEFF_RDY BIT(4) -void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable); -void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer, +void sun8i_ui_scaler_enable(struct sun8i_layer *layer, bool enable); +void sun8i_ui_scaler_setup(struct sun8i_layer *layer, u32 src_w, u32 src_h, u32 dst_w, u32 dst_h, u32 hscale, u32 vscale, u32 hphase, u32 vphase); diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index a09ee4097537..ca3ab59e108d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -11,64 +11,74 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include "sun4i_crtc.h" #include "sun8i_csc.h" #include "sun8i_mixer.h" #include "sun8i_vi_layer.h" #include "sun8i_vi_scaler.h" -static void sun8i_vi_layer_update_alpha(struct sun8i_mixer *mixer, int channel, - int overlay, struct drm_plane *plane) +static void sun8i_vi_layer_disable(struct sun8i_layer *layer) { - u32 mask, val, ch_base; + u32 ch_base = sun8i_channel_base(layer); - ch_base = sun8i_channel_base(mixer, channel); + regmap_write(layer->regs, + SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, layer->overlay), 0); +} - if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) { - mask = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK | - SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK; - val = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA - (plane->state->alpha >> 8); +static void sun8i_vi_layer_update_attributes(struct sun8i_layer *layer, + struct drm_plane *plane) +{ + struct drm_plane_state *state = plane->state; + const struct drm_format_info *fmt; + u32 val, ch_base, hw_fmt; - val |= (plane->state->alpha == DRM_BLEND_ALPHA_OPAQUE) ? + ch_base = sun8i_channel_base(layer); + fmt = state->fb->format; + sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt); + + val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET; + if (!fmt->is_yuv) + val |= SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE; + val |= SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN; + if (layer->cfg->de_type >= SUN8I_MIXER_DE3) { + val |= SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(state->alpha >> 8); + val |= (state->alpha == DRM_BLEND_ALPHA_OPAQUE) ? SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_PIXEL : SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_COMBINED; + } + + regmap_write(layer->regs, + SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, layer->overlay), val); - regmap_update_bits(mixer->engine.regs, - SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, - overlay), - mask, val); - } else if (mixer->cfg->vi_num == 1) { - regmap_update_bits(mixer->engine.regs, - SUN8I_MIXER_FCC_GLOBAL_ALPHA_REG, - SUN8I_MIXER_FCC_GLOBAL_ALPHA_MASK, - SUN8I_MIXER_FCC_GLOBAL_ALPHA - (plane->state->alpha >> 8)); + if (layer->cfg->de2_fcc_alpha) { + regmap_write(layer->regs, + SUN8I_MIXER_FCC_GLOBAL_ALPHA_REG, + SUN8I_MIXER_FCC_GLOBAL_ALPHA(state->alpha >> 8)); } } -static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, - int overlay, struct drm_plane *plane, - unsigned int zpos) +static void sun8i_vi_layer_update_coord(struct sun8i_layer *layer, + struct drm_plane *plane) { struct drm_plane_state *state = plane->state; + struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(state->crtc); + struct sun8i_mixer *mixer = engine_to_sun8i_mixer(scrtc->engine); const struct drm_format_info *format = state->fb->format; u32 src_w, src_h, dst_w, dst_h; - struct regmap *bld_regs; - u32 bld_base, ch_base; u32 outsize, insize; u32 hphase, vphase; u32 hn = 0, hm = 0; u32 vn = 0, vm = 0; bool subsampled; + u32 ch_base; DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n", - channel, overlay); + layer->channel, layer->overlay); - bld_base = sun8i_blender_base(mixer); - bld_regs = sun8i_blender_regmap(mixer); - ch_base = sun8i_channel_base(mixer, channel); + ch_base = sun8i_channel_base(layer); src_w = drm_rect_width(&state->src) >> 16; src_h = drm_rect_height(&state->src) >> 16; @@ -105,10 +115,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, (state->src.x1 >> 16) & ~(format->hsub - 1), (state->src.y1 >> 16) & ~(format->vsub - 1)); DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h); - regmap_write(mixer->engine.regs, - SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch_base, overlay), + regmap_write(layer->regs, + SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch_base, layer->overlay), insize); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_MIXER_CHAN_VI_OVL_SIZE(ch_base), insize); @@ -143,7 +153,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, } /* it seems that every RGB scaler has buffer for 2048 pixels */ - scanline = subsampled ? mixer->cfg->scanline_yuv : 2048; + scanline = subsampled ? layer->cfg->scanline_yuv : 2048; if (src_w > scanline) { DRM_DEBUG_DRIVER("Using horizontal coarse scaling\n"); @@ -155,108 +165,34 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, hscale = (src_w << 16) / dst_w; vscale = (src_h << 16) / dst_h; - sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w, - dst_h, hscale, vscale, hphase, vphase, - format); - sun8i_vi_scaler_enable(mixer, channel, true); + sun8i_vi_scaler_setup(layer, src_w, src_h, dst_w, dst_h, + hscale, vscale, hphase, vphase, format); + sun8i_vi_scaler_enable(layer, true); } else { DRM_DEBUG_DRIVER("HW scaling is not needed\n"); - sun8i_vi_scaler_enable(mixer, channel, false); + sun8i_vi_scaler_enable(layer, false); } - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_MIXER_CHAN_VI_HDS_Y(ch_base), SUN8I_MIXER_CHAN_VI_DS_N(hn) | SUN8I_MIXER_CHAN_VI_DS_M(hm)); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_MIXER_CHAN_VI_HDS_UV(ch_base), SUN8I_MIXER_CHAN_VI_DS_N(hn) | SUN8I_MIXER_CHAN_VI_DS_M(hm)); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_MIXER_CHAN_VI_VDS_Y(ch_base), SUN8I_MIXER_CHAN_VI_DS_N(vn) | SUN8I_MIXER_CHAN_VI_DS_M(vm)); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_MIXER_CHAN_VI_VDS_UV(ch_base), SUN8I_MIXER_CHAN_VI_DS_N(vn) | SUN8I_MIXER_CHAN_VI_DS_M(vm)); - - /* Set base coordinates */ - DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n", - state->dst.x1, state->dst.y1); - DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h); - regmap_write(bld_regs, - SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos), - SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1)); - regmap_write(bld_regs, - SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos), - outsize); - - return 0; -} - -static u32 sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format) -{ - if (!format->is_yuv) - return SUN8I_CSC_MODE_OFF; - - switch (format->format) { - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YVU444: - return SUN8I_CSC_MODE_YVU2RGB; - default: - return SUN8I_CSC_MODE_YUV2RGB; - } -} - -static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel, - int overlay, struct drm_plane *plane) -{ - struct drm_plane_state *state = plane->state; - u32 val, ch_base, csc_mode, hw_fmt; - const struct drm_format_info *fmt; - int ret; - - ch_base = sun8i_channel_base(mixer, channel); - - fmt = state->fb->format; - ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt); - if (ret) { - DRM_DEBUG_DRIVER("Invalid format\n"); - return ret; - } - - val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET; - regmap_update_bits(mixer->engine.regs, - SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay), - SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val); - - csc_mode = sun8i_vi_layer_get_csc_mode(fmt); - if (csc_mode != SUN8I_CSC_MODE_OFF) { - sun8i_csc_set_ccsc_coefficients(mixer, channel, csc_mode, - state->color_encoding, - state->color_range); - sun8i_csc_enable_ccsc(mixer, channel, true); - } else { - sun8i_csc_enable_ccsc(mixer, channel, false); - } - - if (!fmt->is_yuv) - val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE; - else - val = 0; - - regmap_update_bits(mixer->engine.regs, - SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay), - SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE, val); - - return 0; } -static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel, - int overlay, struct drm_plane *plane) +static void sun8i_vi_layer_update_buffer(struct sun8i_layer *layer, + struct drm_plane *plane) { struct drm_plane_state *state = plane->state; struct drm_framebuffer *fb = state->fb; @@ -267,7 +203,7 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel, u32 ch_base; int i; - ch_base = sun8i_channel_base(mixer, channel); + ch_base = sun8i_channel_base(layer); /* Adjust x and y to be dividable by subsampling factor */ src_x = (state->src.x1 >> 16) & ~(format->hsub - 1); @@ -297,21 +233,19 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel, /* Set the line width */ DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n", i + 1, fb->pitches[i]); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_MIXER_CHAN_VI_LAYER_PITCH(ch_base, - overlay, i), + layer->overlay, i), fb->pitches[i]); DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n", i + 1, &dma_addr); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch_base, - overlay, i), + layer->overlay, i), lower_32_bits(dma_addr)); } - - return 0; } static int sun8i_vi_layer_atomic_check(struct drm_plane *plane, @@ -322,20 +256,28 @@ static int sun8i_vi_layer_atomic_check(struct drm_plane *plane, struct sun8i_layer *layer = plane_to_sun8i_layer(plane); struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *crtc_state; - int min_scale, max_scale; + const struct drm_format_info *fmt; + int min_scale, max_scale, ret; + u32 hw_fmt; if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; + fmt = new_plane_state->fb->format; + ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt); + if (ret) { + DRM_DEBUG_DRIVER("Invalid plane format\n"); + return ret; + } + min_scale = DRM_PLANE_NO_SCALING; max_scale = DRM_PLANE_NO_SCALING; - if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) { + if (layer->cfg->scaler_mask & BIT(layer->channel)) { min_scale = SUN8I_VI_SCALER_SCALE_MIN; max_scale = SUN8I_VI_SCALER_SCALE_MAX; } @@ -352,20 +294,16 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane, struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct sun8i_layer *layer = plane_to_sun8i_layer(plane); - unsigned int zpos = new_state->normalized_zpos; - struct sun8i_mixer *mixer = layer->mixer; - if (!new_state->crtc || !new_state->visible) + if (!new_state->crtc || !new_state->visible) { + sun8i_vi_layer_disable(layer); return; + } - sun8i_vi_layer_update_coord(mixer, layer->channel, - layer->overlay, plane, zpos); - sun8i_vi_layer_update_alpha(mixer, layer->channel, - layer->overlay, plane); - sun8i_vi_layer_update_formats(mixer, layer->channel, - layer->overlay, plane); - sun8i_vi_layer_update_buffer(mixer, layer->channel, - layer->overlay, plane); + sun8i_vi_layer_update_attributes(layer, plane); + sun8i_vi_layer_update_coord(layer, plane); + sun8i_csc_config(layer, new_state); + sun8i_vi_layer_update_buffer(layer, plane); } static const struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = { @@ -471,12 +409,14 @@ static const uint64_t sun8i_layer_modifiers[] = { }; struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm, - struct sun8i_mixer *mixer, - int index) + enum drm_plane_type type, + struct regmap *regs, + int index, int phy_index, + int plane_cnt, + const struct sun8i_layer_cfg *cfg) { - enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY; u32 supported_encodings, supported_ranges; - unsigned int plane_cnt, format_count; + unsigned int format_count; struct sun8i_layer *layer; const u32 *formats; int ret; @@ -485,7 +425,14 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm, if (!layer) return ERR_PTR(-ENOMEM); - if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) { + layer->type = SUN8I_LAYER_TYPE_VI; + layer->index = index; + layer->channel = phy_index; + layer->overlay = 0; + layer->regs = regs; + layer->cfg = cfg; + + if (layer->cfg->de_type >= SUN8I_MIXER_DE3) { formats = sun8i_vi_layer_de3_formats; format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats); } else { @@ -493,9 +440,6 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm, format_count = ARRAY_SIZE(sun8i_vi_layer_formats); } - if (!mixer->cfg->ui_num && index == 0) - type = DRM_PLANE_TYPE_PRIMARY; - /* possible crtcs are set later */ ret = drm_universal_plane_init(drm, &layer->plane, 0, &sun8i_vi_layer_funcs, @@ -507,9 +451,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm, return ERR_PTR(ret); } - plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num; - - if (mixer->cfg->vi_num == 1 || mixer->cfg->de_type >= SUN8I_MIXER_DE3) { + if (layer->cfg->de2_fcc_alpha || layer->cfg->de_type >= SUN8I_MIXER_DE3) { ret = drm_plane_create_alpha_property(&layer->plane); if (ret) { dev_err(drm->dev, "Couldn't add alpha property\n"); @@ -526,7 +468,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm, supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709); - if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) + if (layer->cfg->de_type >= SUN8I_MIXER_DE3) supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020); supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | @@ -543,10 +485,6 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm, } drm_plane_helper_add(&layer->plane, &sun8i_vi_layer_helper_funcs); - layer->mixer = mixer; - layer->type = SUN8I_LAYER_TYPE_VI; - layer->channel = index; - layer->overlay = 0; return layer; } diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h index 655440cdc78f..29cc5573691f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h @@ -55,6 +55,9 @@ struct sun8i_mixer; struct sun8i_layer; struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm, - struct sun8i_mixer *mixer, - int index); + enum drm_plane_type type, + struct regmap *regs, + int index, int phy_index, + int plane_cnt, + const struct sun8i_layer_cfg *cfg); #endif /* _SUN8I_VI_LAYER_H_ */ diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c index 82df6244af88..3dec4eeb1ba2 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c @@ -833,16 +833,17 @@ static const u32 bicubic4coefftab32[480] = { 0x1012110d, 0x1012110d, 0x1013110c, 0x1013110c, }; -static u32 sun8i_vi_scaler_base(struct sun8i_mixer *mixer, int channel) +static u32 sun8i_vi_scaler_base(struct sun8i_layer *layer) { - if (mixer->cfg->de_type == SUN8I_MIXER_DE33) - return sun8i_channel_base(mixer, channel) + 0x3000; - else if (mixer->cfg->de_type == SUN8I_MIXER_DE3) + if (layer->cfg->de_type == SUN8I_MIXER_DE33) + return DE33_VI_SCALER_UNIT_BASE + + DE33_CH_SIZE * layer->channel; + else if (layer->cfg->de_type == SUN8I_MIXER_DE3) return DE3_VI_SCALER_UNIT_BASE + - DE3_VI_SCALER_UNIT_SIZE * channel; + DE3_VI_SCALER_UNIT_SIZE * layer->channel; else return DE2_VI_SCALER_UNIT_BASE + - DE2_VI_SCALER_UNIT_SIZE * channel; + DE2_VI_SCALER_UNIT_SIZE * layer->channel; } static int sun8i_vi_scaler_coef_index(unsigned int step) @@ -909,11 +910,11 @@ static void sun8i_vi_scaler_set_coeff(struct regmap *map, u32 base, } } -void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable) +void sun8i_vi_scaler_enable(struct sun8i_layer *layer, bool enable) { u32 val, base; - base = sun8i_vi_scaler_base(mixer, layer); + base = sun8i_vi_scaler_base(layer); if (enable) val = SUN8I_SCALER_VSU_CTRL_EN | @@ -921,11 +922,11 @@ void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable) else val = 0; - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_CTRL(base), val); } -void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer, +void sun8i_vi_scaler_setup(struct sun8i_layer *layer, u32 src_w, u32 src_h, u32 dst_w, u32 dst_h, u32 hscale, u32 vscale, u32 hphase, u32 vphase, const struct drm_format_info *format) @@ -934,7 +935,7 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer, u32 insize, outsize; u32 base; - base = sun8i_vi_scaler_base(mixer, layer); + base = sun8i_vi_scaler_base(layer); hphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16; vphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16; @@ -958,7 +959,7 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer, cvphase = vphase; } - if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) { + if (layer->cfg->de_type >= SUN8I_MIXER_DE3) { u32 val; if (format->hsub == 1 && format->vsub == 1) @@ -966,36 +967,36 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer, else val = SUN50I_SCALER_VSU_SCALE_MODE_NORMAL; - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN50I_SCALER_VSU_SCALE_MODE(base), val); } - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_OUTSIZE(base), outsize); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_YINSIZE(base), insize); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_YHSTEP(base), hscale); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_YVSTEP(base), vscale); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_YHPHASE(base), hphase); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_YVPHASE(base), vphase); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_CINSIZE(base), SUN8I_VI_SCALER_SIZE(src_w / format->hsub, src_h / format->vsub)); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_CHSTEP(base), hscale / format->hsub); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_CVSTEP(base), vscale / format->vsub); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_CHPHASE(base), chphase); - regmap_write(mixer->engine.regs, + regmap_write(layer->regs, SUN8I_SCALER_VSU_CVPHASE(base), cvphase); - sun8i_vi_scaler_set_coeff(mixer->engine.regs, base, + sun8i_vi_scaler_set_coeff(layer->regs, base, hscale, vscale, format); } diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h index 68f6593b369a..245fe2f431c3 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h +++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h @@ -18,6 +18,8 @@ #define DE3_VI_SCALER_UNIT_BASE 0x20000 #define DE3_VI_SCALER_UNIT_SIZE 0x08000 +#define DE33_VI_SCALER_UNIT_BASE 0x4000 + /* this two macros assumes 16 fractional bits which is standard in DRM */ #define SUN8I_VI_SCALER_SCALE_MIN 1 #define SUN8I_VI_SCALER_SCALE_MAX ((1UL << 20) - 1) @@ -69,8 +71,8 @@ #define SUN50I_SCALER_VSU_ANGLE_SHIFT(x) (((x) << 16) & 0xF) #define SUN50I_SCALER_VSU_ANGLE_OFFSET(x) ((x) & 0xFF) -void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable); -void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer, +void sun8i_vi_scaler_enable(struct sun8i_layer *layer, bool enable); +void sun8i_vi_scaler_setup(struct sun8i_layer *layer, u32 src_w, u32 src_h, u32 dst_w, u32 dst_h, u32 hscale, u32 vscale, u32 hphase, u32 vphase, const struct drm_format_info *format); diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h index 89633e30ca62..da670d7eeb2e 100644 --- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h +++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h @@ -10,12 +10,19 @@ #include <drm/drm_crtc.h> #include <drm/drm_device.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_modes.h> struct drm_format_info; struct drm_scanout_buffer; struct screen_info; +typedef void (*drm_sysfb_blit_func)(struct iosys_map *, const unsigned int *, + const struct iosys_map *, + const struct drm_framebuffer *, + const struct drm_rect *, + struct drm_format_conv_state *); + /* * Input parsing */ @@ -93,10 +100,25 @@ static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *de * Plane */ +struct drm_sysfb_plane_state { + struct drm_shadow_plane_state base; + + /* transfers framebuffer data to scanout buffer in CRTC format */ + drm_sysfb_blit_func blit_to_crtc; +}; + +static inline struct drm_sysfb_plane_state * +to_drm_sysfb_plane_state(struct drm_plane_state *base) +{ + return container_of(to_drm_shadow_plane_state(base), struct drm_sysfb_plane_state, base); +} + size_t drm_sysfb_build_fourcc_list(struct drm_device *dev, const u32 *native_fourccs, size_t native_nfourccs, u32 *fourccs_out, size_t nfourccs_out); +int drm_sysfb_plane_helper_begin_fb_access(struct drm_plane *plane, + struct drm_plane_state *plane_state); int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *new_state); void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, @@ -114,16 +136,24 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane, DRM_FORMAT_MOD_INVALID #define DRM_SYSFB_PLANE_HELPER_FUNCS \ - DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \ + .begin_fb_access = drm_sysfb_plane_helper_begin_fb_access, \ + .end_fb_access = drm_gem_end_shadow_fb_access, \ .atomic_check = drm_sysfb_plane_helper_atomic_check, \ .atomic_update = drm_sysfb_plane_helper_atomic_update, \ .atomic_disable = drm_sysfb_plane_helper_atomic_disable, \ .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer +void drm_sysfb_plane_reset(struct drm_plane *plane); +struct drm_plane_state *drm_sysfb_plane_atomic_duplicate_state(struct drm_plane *plane); +void drm_sysfb_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *plane_state); + #define DRM_SYSFB_PLANE_FUNCS \ + .reset = drm_sysfb_plane_reset, \ .update_plane = drm_atomic_helper_update_plane, \ .disable_plane = drm_atomic_helper_disable_plane, \ - DRM_GEM_SHADOW_PLANE_FUNCS + .atomic_duplicate_state = drm_sysfb_plane_atomic_duplicate_state, \ + .atomic_destroy_state = drm_sysfb_plane_atomic_destroy_state /* * CRTC diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c index ddb4a7523ee6..6214b7709b37 100644 --- a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c +++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c @@ -11,7 +11,6 @@ #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> -#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_panic.h> #include <drm/drm_print.h> @@ -185,6 +184,104 @@ size_t drm_sysfb_build_fourcc_list(struct drm_device *dev, } EXPORT_SYMBOL(drm_sysfb_build_fourcc_list); +static void drm_sysfb_plane_state_destroy(struct drm_sysfb_plane_state *sysfb_plane_state) +{ + __drm_gem_destroy_shadow_plane_state(&sysfb_plane_state->base); + + kfree(sysfb_plane_state); +} + +static void drm_sysfb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state) +{ + drm_fb_memcpy(dst, dst_pitch, src, fb, clip); +} + +static drm_sysfb_blit_func drm_sysfb_get_blit_func(u32 dst_format, u32 src_format) +{ + if (src_format == dst_format) { + return drm_sysfb_memcpy; + } else if (src_format == DRM_FORMAT_XRGB8888) { + switch (dst_format) { + case DRM_FORMAT_RGB565: + return drm_fb_xrgb8888_to_rgb565; + case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN: + return drm_fb_xrgb8888_to_rgb565be; + case DRM_FORMAT_XRGB1555: + return drm_fb_xrgb8888_to_xrgb1555; + case DRM_FORMAT_ARGB1555: + return drm_fb_xrgb8888_to_argb1555; + case DRM_FORMAT_RGBA5551: + return drm_fb_xrgb8888_to_rgba5551; + case DRM_FORMAT_RGB888: + return drm_fb_xrgb8888_to_rgb888; + case DRM_FORMAT_BGR888: + return drm_fb_xrgb8888_to_bgr888; + case DRM_FORMAT_ARGB8888: + return drm_fb_xrgb8888_to_argb8888; + case DRM_FORMAT_XBGR8888: + return drm_fb_xrgb8888_to_xbgr8888; + case DRM_FORMAT_ABGR8888: + return drm_fb_xrgb8888_to_abgr8888; + case DRM_FORMAT_XRGB2101010: + return drm_fb_xrgb8888_to_xrgb2101010; + case DRM_FORMAT_ARGB2101010: + return drm_fb_xrgb8888_to_argb2101010; + case DRM_FORMAT_BGRX8888: + return drm_fb_xrgb8888_to_bgrx8888; + case DRM_FORMAT_RGB332: + return drm_fb_xrgb8888_to_rgb332; + } + } + + return NULL; +} + +int drm_sysfb_plane_helper_begin_fb_access(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + struct drm_device *dev = plane->dev; + struct drm_sysfb_plane_state *sysfb_plane_state = to_drm_sysfb_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_crtc_state *crtc_state; + struct drm_sysfb_crtc_state *sysfb_crtc_state; + drm_sysfb_blit_func blit_to_crtc; + int ret; + + ret = drm_gem_begin_shadow_fb_access(plane, plane_state); + if (ret) + return ret; + + if (!fb) + return 0; + + ret = -EINVAL; + + crtc_state = drm_atomic_get_new_crtc_state(plane_state->state, plane_state->crtc); + if (drm_WARN_ON_ONCE(dev, !crtc_state)) + goto err_drm_gem_end_shadow_fb_access; + sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); + + if (drm_WARN_ON_ONCE(dev, !sysfb_crtc_state->format)) + goto err_drm_gem_end_shadow_fb_access; + blit_to_crtc = drm_sysfb_get_blit_func(sysfb_crtc_state->format->format, + fb->format->format); + if (!blit_to_crtc) { + drm_warn_once(dev, "No blit helper from %p4cc to %p4cc found.\n", + &fb->format->format, &sysfb_crtc_state->format->format); + goto err_drm_gem_end_shadow_fb_access; + } + sysfb_plane_state->blit_to_crtc = blit_to_crtc; + + return 0; + +err_drm_gem_end_shadow_fb_access: + drm_gem_end_shadow_fb_access(plane, plane_state); + return ret; +} +EXPORT_SYMBOL(drm_sysfb_plane_helper_begin_fb_access); + int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *new_state) { @@ -235,12 +332,14 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev); struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); - struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_sysfb_plane_state *sysfb_plane_state = to_drm_sysfb_plane_state(plane_state); + struct drm_shadow_plane_state *shadow_plane_state = &sysfb_plane_state->base; struct drm_framebuffer *fb = plane_state->fb; unsigned int dst_pitch = sysfb->fb_pitch; struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); const struct drm_format_info *dst_format = sysfb_crtc_state->format; + drm_sysfb_blit_func blit_to_crtc = sysfb_plane_state->blit_to_crtc; struct drm_atomic_helper_damage_iter iter; struct drm_rect damage; int ret, idx; @@ -261,8 +360,8 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at continue; iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip)); - drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb, - &damage, &shadow_plane_state->fmtcnv_state); + blit_to_crtc(&dst, &dst_pitch, shadow_plane_state->data, fb, &damage, + &shadow_plane_state->fmtcnv_state); } drm_dev_exit(idx); @@ -321,6 +420,52 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane, } EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer); +void drm_sysfb_plane_reset(struct drm_plane *plane) +{ + struct drm_sysfb_plane_state *sysfb_plane_state; + + if (plane->state) + drm_sysfb_plane_state_destroy(to_drm_sysfb_plane_state(plane->state)); + + sysfb_plane_state = kzalloc(sizeof(*sysfb_plane_state), GFP_KERNEL); + if (sysfb_plane_state) + __drm_gem_reset_shadow_plane(plane, &sysfb_plane_state->base); + else + __drm_gem_reset_shadow_plane(plane, NULL); +} +EXPORT_SYMBOL(drm_sysfb_plane_reset); + +struct drm_plane_state *drm_sysfb_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct drm_plane_state *plane_state = plane->state; + struct drm_sysfb_plane_state *sysfb_plane_state; + struct drm_sysfb_plane_state *new_sysfb_plane_state; + struct drm_shadow_plane_state *new_shadow_plane_state; + + if (drm_WARN_ON(dev, !plane_state)) + return NULL; + sysfb_plane_state = to_drm_sysfb_plane_state(plane_state); + + new_sysfb_plane_state = kzalloc(sizeof(*new_sysfb_plane_state), GFP_KERNEL); + if (!new_sysfb_plane_state) + return NULL; + new_shadow_plane_state = &new_sysfb_plane_state->base; + + __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); + new_sysfb_plane_state->blit_to_crtc = sysfb_plane_state->blit_to_crtc; + + return &new_shadow_plane_state->base; +} +EXPORT_SYMBOL(drm_sysfb_plane_atomic_duplicate_state); + +void drm_sysfb_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + drm_sysfb_plane_state_destroy(to_drm_sysfb_plane_state(plane_state)); +} +EXPORT_SYMBOL(drm_sysfb_plane_atomic_destroy_state); + /* * CRTC */ diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c index 1883c4a8604c..1b683d55d6ea 100644 --- a/drivers/gpu/drm/sysfb/efidrm.c +++ b/drivers/gpu/drm/sysfb/efidrm.c @@ -21,6 +21,7 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <video/edid.h> diff --git a/drivers/gpu/drm/sysfb/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c index 8d8ab39c5f36..d38ba70f4e0d 100644 --- a/drivers/gpu/drm/sysfb/ofdrm.c +++ b/drivers/gpu/drm/sysfb/ofdrm.c @@ -21,6 +21,7 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "drm_sysfb_helper.h" diff --git a/drivers/gpu/drm/sysfb/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c index 0358164a623c..7a95d2dacd9d 100644 --- a/drivers/gpu/drm/sysfb/simpledrm.c +++ b/drivers/gpu/drm/sysfb/simpledrm.c @@ -2,8 +2,9 @@ #include <linux/aperture.h> #include <linux/clk.h> -#include <linux/of_clk.h> #include <linux/minmax.h> +#include <linux/of_address.h> +#include <linux/of_clk.h> #include <linux/of_reserved_mem.h> #include <linux/platform_data/simplefb.h> #include <linux/platform_device.h> @@ -24,6 +25,7 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "drm_sysfb_helper.h" diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c index 16a4b52d45c6..7b7b5ba26317 100644 --- a/drivers/gpu/drm/sysfb/vesadrm.c +++ b/drivers/gpu/drm/sysfb/vesadrm.c @@ -22,6 +22,7 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <video/edid.h> @@ -295,7 +296,8 @@ static int vesadrm_primary_plane_helper_atomic_check(struct drm_plane *plane, } static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = { - DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .begin_fb_access = drm_sysfb_plane_helper_begin_fb_access, + .end_fb_access = drm_gem_end_shadow_fb_access, .atomic_check = vesadrm_primary_plane_helper_atomic_check, .atomic_update = drm_sysfb_plane_helper_atomic_update, .atomic_disable = drm_sysfb_plane_helper_atomic_disable, diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile index 6fc4b504e786..e399b40d64a1 100644 --- a/drivers/gpu/drm/tegra/Makefile +++ b/drivers/gpu/drm/tegra/Makefile @@ -25,6 +25,7 @@ tegra-drm-y := \ falcon.o \ vic.o \ nvdec.o \ + nvjpg.o \ riscv.o tegra-drm-y += trace.o diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 59d5c1ba145a..01e9d5011dd8 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -27,6 +27,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "dc.h" @@ -1033,7 +1034,7 @@ static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_a int min_scale, max_scale; int err; - crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -3148,6 +3149,7 @@ static int tegra_dc_couple(struct tegra_dc *dc) dc->client.parent = &parent->client; dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion)); + put_device(companion); } return 0; diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 4596073fe28f..1d18d43292dc 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -22,6 +22,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_ioctl.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) @@ -1383,6 +1384,7 @@ static const struct of_device_id host1x_drm_subdevs[] = { { .compatible = "nvidia,tegra210-sor1", }, { .compatible = "nvidia,tegra210-vic", }, { .compatible = "nvidia,tegra210-nvdec", }, + { .compatible = "nvidia,tegra210-nvjpg", }, { .compatible = "nvidia,tegra186-display", }, { .compatible = "nvidia,tegra186-dc", }, { .compatible = "nvidia,tegra186-sor", }, @@ -1421,6 +1423,7 @@ static struct platform_driver * const drivers[] = { &tegra_gr3d_driver, &tegra_vic_driver, &tegra_nvdec_driver, + &tegra_nvjpg_driver, }; static int __init host1x_drm_init(void) diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 1dd3670f37db..ae68b03d8483 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -214,5 +214,6 @@ extern struct platform_driver tegra_gr2d_driver; extern struct platform_driver tegra_gr3d_driver; extern struct platform_driver tegra_vic_driver; extern struct platform_driver tegra_nvdec_driver; +extern struct platform_driver tegra_nvjpg_driver; #endif /* HOST1X_DRM_H */ diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index b5089b772267..175f5f9937b0 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -22,6 +22,7 @@ #include <drm/drm_file.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "dc.h" @@ -545,12 +546,19 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, /* horizontal back porch */ hbp = (mode->htotal - mode->hsync_end) * mul / div; - if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0) - hbp += hsw; - /* horizontal front porch */ hfp = (mode->hsync_start - mode->hdisplay) * mul / div; + if (dsi->master || dsi->slave) { + hact /= 2; + hsw /= 2; + hbp /= 2; + hfp /= 2; + } + + if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0) + hbp += hsw; + /* subtract packet overhead */ hsw -= 10; hbp -= 14; @@ -560,11 +568,6 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3); tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5); tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7); - - /* set SOL delay (for non-burst mode only) */ - tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY); - - /* TODO: implement ganged mode */ } else { u16 bytes; @@ -586,29 +589,28 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, value = MIPI_DCS_WRITE_MEMORY_START << 8 | MIPI_DCS_WRITE_MEMORY_CONTINUE; tegra_dsi_writel(dsi, value, DSI_DCS_CMDS); + } - /* set SOL delay */ - if (dsi->master || dsi->slave) { - unsigned long delay, bclk, bclk_ganged; - unsigned int lanes = state->lanes; - - /* SOL to valid, valid to FIFO and FIFO write delay */ - delay = 4 + 4 + 2; - delay = DIV_ROUND_UP(delay * mul, div * lanes); - /* FIFO read delay */ - delay = delay + 6; - - bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes); - bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes); - value = bclk - bclk_ganged + delay + 20; - } else { - /* TODO: revisit for non-ganged mode */ - value = 8 * mul / div; - } + /* set SOL delay */ + if (dsi->master || dsi->slave) { + unsigned long delay, bclk, bclk_ganged; + unsigned int lanes = state->lanes; + + /* SOL to valid, valid to FIFO and FIFO write delay */ + delay = 4 + 4 + 2; + delay = DIV_ROUND_UP(delay * mul, div * lanes); + /* FIFO read delay */ + delay = delay + 6; - tegra_dsi_writel(dsi, value, DSI_SOL_DELAY); + bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes); + bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes); + value = bclk - bclk_ganged + delay + 20; + } else { + value = 8 * mul / div; } + tegra_dsi_writel(dsi, value, DSI_SOL_DELAY); + if (dsi->slave) { tegra_dsi_configure(dsi->slave, pipe, mode); @@ -913,15 +915,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) u32 value; int err; - /* If the bootloader enabled DSI it needs to be disabled - * in order for the panel initialization commands to be - * properly sent. - */ - value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); - - if (value & DSI_POWER_CONTROL_ENABLE) - tegra_dsi_disable(dsi); - err = tegra_dsi_prepare(dsi); if (err < 0) { dev_err(dsi->dev, "failed to prepare: %d\n", err); diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index dd041089f797..1cef8c5cac50 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -13,6 +13,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_print.h> #include "drm.h" #include "gem.h" diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c index 1b70f5e164af..8f40882aa76e 100644 --- a/drivers/gpu/drm/tegra/fbdev.c +++ b/drivers/gpu/drm/tegra/fbdev.c @@ -53,8 +53,6 @@ static void tegra_fbdev_fb_destroy(struct fb_info *info) drm_framebuffer_remove(fb); drm_client_release(&helper->client); - drm_fb_helper_unprepare(helper); - kfree(helper); } static const struct fb_ops tegra_fb_ops = { @@ -75,10 +73,10 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, struct tegra_drm *tegra = helper->dev->dev_private; struct drm_device *drm = helper->dev; struct drm_mode_fb_cmd2 cmd = { 0 }; + struct fb_info *info = helper->info; unsigned int bytes_per_pixel; struct drm_framebuffer *fb; unsigned long offset; - struct fb_info *info; struct tegra_bo *bo; size_t size; int err; @@ -99,13 +97,6 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, if (IS_ERR(bo)) return PTR_ERR(bo); - info = drm_fb_helper_alloc_info(helper); - if (IS_ERR(info)) { - dev_err(drm->dev, "failed to allocate framebuffer info\n"); - drm_gem_object_put(&bo->gem); - return PTR_ERR(info); - } - fb = tegra_fb_alloc(drm, drm_get_format_info(drm, cmd.pixel_format, cmd.modifier[0]), &cmd, &bo, 1); diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 8ede07fb7a21..6b14f1e919eb 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -16,6 +16,7 @@ #include <linux/vmalloc.h> #include <drm/drm_drv.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_prime.h> #include "drm.h" @@ -542,12 +543,13 @@ void tegra_bo_free_object(struct drm_gem_object *gem) int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, struct drm_mode_create_dumb *args) { - unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); struct tegra_drm *tegra = drm->dev_private; struct tegra_bo *bo; + int ret; - args->pitch = round_up(min_pitch, tegra->pitch_align); - args->size = args->pitch * args->height; + ret = drm_mode_size_dumb(drm, args, tegra->pitch_align, 0); + if (ret) + return ret; bo = tegra_bo_create_with_handle(file, drm, args->size, 0, &args->handle); diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 8cd2969e7d4b..0adcd4244a42 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -28,6 +28,7 @@ #include <drm/drm_eld.h> #include <drm/drm_file.h> #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> @@ -658,7 +659,7 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data, { const u8 *ptr = data; unsigned long offset; - size_t i, j; + size_t i; u32 value; switch (ptr[0]) { @@ -691,7 +692,7 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data, * - subpack_low: bytes 0 - 3 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00) */ - for (i = 3, j = 0; i < size; i += 7, j += 8) { + for (i = 3; i < size; i += 7) { size_t rem = size - i, num = min_t(size_t, rem, 4); value = tegra_hdmi_subpack(&ptr[i], num); diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 8f779f23dc09..c924ffba4094 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -20,6 +20,7 @@ #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "drm.h" diff --git a/drivers/gpu/drm/tegra/nvjpg.c b/drivers/gpu/drm/tegra/nvjpg.c new file mode 100644 index 000000000000..94503fd0d52d --- /dev/null +++ b/drivers/gpu/drm/tegra/nvjpg.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/host1x.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "drm.h" +#include "falcon.h" + +struct nvjpg_config { + const char *firmware; + unsigned int version; +}; + +struct nvjpg { + struct falcon falcon; + + void __iomem *regs; + struct tegra_drm_client client; + struct device *dev; + struct clk *clk; + + /* Platform configuration */ + const struct nvjpg_config *config; +}; + +static inline struct nvjpg *to_nvjpg(struct tegra_drm_client *client) +{ + return container_of(client, struct nvjpg, client); +} + +static int nvjpg_init(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->host); + struct tegra_drm *tegra = dev->dev_private; + struct nvjpg *nvjpg = to_nvjpg(drm); + int err; + + err = host1x_client_iommu_attach(client); + if (err < 0 && err != -ENODEV) { + dev_err(nvjpg->dev, "failed to attach to domain: %d\n", err); + return err; + } + + err = tegra_drm_register_client(tegra, drm); + if (err < 0) + goto detach; + + /* + * Inherit the DMA parameters (such as maximum segment size) from the + * parent host1x device. + */ + client->dev->dma_parms = client->host->dma_parms; + + return 0; + +detach: + host1x_client_iommu_detach(client); + + return err; +} + +static int nvjpg_exit(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->host); + struct tegra_drm *tegra = dev->dev_private; + struct nvjpg *nvjpg = to_nvjpg(drm); + int err; + + /* avoid a dangling pointer just in case this disappears */ + client->dev->dma_parms = NULL; + + err = tegra_drm_unregister_client(tegra, drm); + if (err < 0) + return err; + + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + + host1x_client_iommu_detach(client); + + if (client->group) { + dma_unmap_single(nvjpg->dev, nvjpg->falcon.firmware.phys, + nvjpg->falcon.firmware.size, DMA_TO_DEVICE); + tegra_drm_free(tegra, nvjpg->falcon.firmware.size, + nvjpg->falcon.firmware.virt, + nvjpg->falcon.firmware.iova); + } else { + dma_free_coherent(nvjpg->dev, nvjpg->falcon.firmware.size, + nvjpg->falcon.firmware.virt, + nvjpg->falcon.firmware.iova); + } + + return 0; +} + +static const struct host1x_client_ops nvjpg_client_ops = { + .init = nvjpg_init, + .exit = nvjpg_exit, +}; + +static int nvjpg_load_falcon_firmware(struct nvjpg *nvjpg) +{ + struct host1x_client *client = &nvjpg->client.base; + struct tegra_drm *tegra = nvjpg->client.drm; + dma_addr_t iova; + size_t size; + void *virt; + int err; + + if (nvjpg->falcon.firmware.virt) + return 0; + + err = falcon_read_firmware(&nvjpg->falcon, nvjpg->config->firmware); + if (err < 0) + return err; + + size = nvjpg->falcon.firmware.size; + + if (!client->group) { + virt = dma_alloc_coherent(nvjpg->dev, size, &iova, GFP_KERNEL); + if (!virt) + return -ENOMEM; + } else { + virt = tegra_drm_alloc(tegra, size, &iova); + if (IS_ERR(virt)) + return PTR_ERR(virt); + } + + nvjpg->falcon.firmware.virt = virt; + nvjpg->falcon.firmware.iova = iova; + + err = falcon_load_firmware(&nvjpg->falcon); + if (err < 0) + goto cleanup; + + /* + * In this case we have received an IOVA from the shared domain, so we + * need to make sure to get the physical address so that the DMA API + * knows what memory pages to flush the cache for. + */ + if (client->group) { + dma_addr_t phys; + + phys = dma_map_single(nvjpg->dev, virt, size, DMA_TO_DEVICE); + + err = dma_mapping_error(nvjpg->dev, phys); + if (err < 0) + goto cleanup; + + nvjpg->falcon.firmware.phys = phys; + } + + return 0; + +cleanup: + if (!client->group) + dma_free_coherent(nvjpg->dev, size, virt, iova); + else + tegra_drm_free(tegra, size, virt, iova); + + return err; +} + +static __maybe_unused int nvjpg_runtime_resume(struct device *dev) +{ + struct nvjpg *nvjpg = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(nvjpg->clk); + if (err < 0) + return err; + + usleep_range(20, 30); + + err = nvjpg_load_falcon_firmware(nvjpg); + if (err < 0) + goto disable_clk; + + err = falcon_boot(&nvjpg->falcon); + if (err < 0) + goto disable_clk; + + return 0; + +disable_clk: + clk_disable_unprepare(nvjpg->clk); + return err; +} + +static __maybe_unused int nvjpg_runtime_suspend(struct device *dev) +{ + struct nvjpg *nvjpg = dev_get_drvdata(dev); + + clk_disable_unprepare(nvjpg->clk); + + return 0; +} + +static int nvjpg_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported) +{ + *supported = false; + + return 0; +} + +static const struct tegra_drm_client_ops nvjpg_ops = { + .get_streamid_offset = NULL, + .can_use_memory_ctx = nvjpg_can_use_memory_ctx, +}; + +#define NVIDIA_TEGRA_210_NVJPG_FIRMWARE "nvidia/tegra210/nvjpg.bin" + +static const struct nvjpg_config tegra210_nvjpg_config = { + .firmware = NVIDIA_TEGRA_210_NVJPG_FIRMWARE, + .version = 0x21, +}; + +static const struct of_device_id tegra_nvjpg_of_match[] = { + { .compatible = "nvidia,tegra210-nvjpg", .data = &tegra210_nvjpg_config }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_nvjpg_of_match); + +static int nvjpg_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct nvjpg *nvjpg; + int err; + + /* inherit DMA mask from host1x parent */ + err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); + return err; + } + + nvjpg = devm_kzalloc(dev, sizeof(*nvjpg), GFP_KERNEL); + if (!nvjpg) + return -ENOMEM; + + nvjpg->config = of_device_get_match_data(dev); + + nvjpg->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(nvjpg->regs)) + return PTR_ERR(nvjpg->regs); + + nvjpg->clk = devm_clk_get(dev, "nvjpg"); + if (IS_ERR(nvjpg->clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return PTR_ERR(nvjpg->clk); + } + + err = clk_set_rate(nvjpg->clk, ULONG_MAX); + if (err < 0) { + dev_err(&pdev->dev, "failed to set clock rate\n"); + return err; + } + + nvjpg->falcon.dev = dev; + nvjpg->falcon.regs = nvjpg->regs; + + err = falcon_init(&nvjpg->falcon); + if (err < 0) + return err; + + platform_set_drvdata(pdev, nvjpg); + + INIT_LIST_HEAD(&nvjpg->client.base.list); + nvjpg->client.base.ops = &nvjpg_client_ops; + nvjpg->client.base.dev = dev; + nvjpg->client.base.class = HOST1X_CLASS_NVJPG; + nvjpg->dev = dev; + + INIT_LIST_HEAD(&nvjpg->client.list); + nvjpg->client.version = nvjpg->config->version; + nvjpg->client.ops = &nvjpg_ops; + + err = host1x_client_register(&nvjpg->client.base); + if (err < 0) { + dev_err(dev, "failed to register host1x client: %d\n", err); + goto exit_falcon; + } + + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, 500); + devm_pm_runtime_enable(dev); + + return 0; + +exit_falcon: + falcon_exit(&nvjpg->falcon); + + return err; +} + +static void nvjpg_remove(struct platform_device *pdev) +{ + struct nvjpg *nvjpg = platform_get_drvdata(pdev); + + host1x_client_unregister(&nvjpg->client.base); + falcon_exit(&nvjpg->falcon); +} + +static const struct dev_pm_ops nvjpg_pm_ops = { + RUNTIME_PM_OPS(nvjpg_runtime_suspend, nvjpg_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) +}; + +struct platform_driver tegra_nvjpg_driver = { + .driver = { + .name = "tegra-nvjpg", + .of_match_table = tegra_nvjpg_of_match, + .pm = &nvjpg_pm_ops + }, + .probe = nvjpg_probe, + .remove = nvjpg_remove, +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVJPG_FIRMWARE); +#endif diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 21f3dfdcc5c9..4023cb5998f1 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -24,6 +24,7 @@ #include <drm/drm_eld.h> #include <drm/drm_file.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "dc.h" @@ -1864,7 +1865,7 @@ static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor, { const u8 *ptr = data; unsigned long offset; - size_t i, j; + size_t i; u32 value; switch (ptr[0]) { @@ -1897,7 +1898,7 @@ static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor, * - subpack_low: bytes 0 - 3 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00) */ - for (i = 3, j = 0; i < size; i += 7, j += 8) { + for (i = 3; i < size; i += 7) { size_t rem = size - i, num = min_t(size_t, rem, 4); value = tegra_sor_hdmi_subpack(&ptr[i], num); diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c index 5adab6b22916..d0b6a1fa6efa 100644 --- a/drivers/gpu/drm/tegra/uapi.c +++ b/drivers/gpu/drm/tegra/uapi.c @@ -114,9 +114,12 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_ if (err) goto put_channel; - if (supported) + if (supported) { + struct pid *pid = get_task_pid(current, PIDTYPE_TGID); context->memory_context = host1x_memory_context_alloc( - host, client->base.dev, get_task_pid(current, PIDTYPE_TGID)); + host, client->base.dev, pid); + put_pid(pid); + } if (IS_ERR(context->memory_context)) { if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) { diff --git a/drivers/gpu/drm/tests/.kunitconfig b/drivers/gpu/drm/tests/.kunitconfig index 6ec04b4c979d..5be8e71f45d5 100644 --- a/drivers/gpu/drm/tests/.kunitconfig +++ b/drivers/gpu/drm/tests/.kunitconfig @@ -1,3 +1,5 @@ CONFIG_KUNIT=y CONFIG_DRM=y +CONFIG_DRM_VKMS=y +CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_KUNIT_TEST=y diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index c0e952293ad0..87d5d5f9332a 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \ drm_plane_helper_test.o \ drm_probe_helper_test.o \ drm_rect_test.o \ - drm_sysfb_modeset_test.o + drm_sysfb_modeset_test.o \ + drm_fixp_test.o CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN) diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c index 7a0e523651f0..5f40b5343bd8 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/drm/tests/drm_buddy_test.c @@ -21,6 +21,110 @@ static inline u64 get_size(int order, u64 chunk_size) return (1 << order) * chunk_size; } +static void drm_test_buddy_fragmentation_performance(struct kunit *test) +{ + struct drm_buddy_block *block, *tmp; + int num_blocks, i, ret, count = 0; + LIST_HEAD(allocated_blocks); + unsigned long elapsed_ms; + LIST_HEAD(reverse_list); + LIST_HEAD(test_blocks); + LIST_HEAD(clear_list); + LIST_HEAD(dirty_list); + LIST_HEAD(free_list); + struct drm_buddy mm; + u64 mm_size = SZ_4G; + ktime_t start, end; + + /* + * Allocation under severe fragmentation + * + * Create severe fragmentation by allocating the entire 4 GiB address space + * as tiny 8 KiB blocks but forcing a 64 KiB alignment. The resulting pattern + * leaves many scattered holes. Split the allocations into two groups and + * return them with different flags to block coalescing, then repeatedly + * allocate and free 64 KiB blocks while timing the loop. This stresses how + * quickly the allocator can satisfy larger, aligned requests from a pool of + * highly fragmented space. + */ + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + "buddy_init failed\n"); + + num_blocks = mm_size / SZ_64K; + + start = ktime_get(); + /* Allocate with maximum fragmentation - 8K blocks with 64K alignment */ + for (i = 0; i < num_blocks; i++) + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, + &allocated_blocks, 0), + "buddy_alloc hit an error size=%u\n", SZ_8K); + + list_for_each_entry_safe(block, tmp, &allocated_blocks, link) { + if (count % 4 == 0 || count % 4 == 3) + list_move_tail(&block->link, &clear_list); + else + list_move_tail(&block->link, &dirty_list); + count++; + } + + /* Free with different flags to ensure no coalescing */ + drm_buddy_free_list(&mm, &clear_list, DRM_BUDDY_CLEARED); + drm_buddy_free_list(&mm, &dirty_list, 0); + + for (i = 0; i < num_blocks; i++) + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_64K, SZ_64K, + &test_blocks, 0), + "buddy_alloc hit an error size=%u\n", SZ_64K); + drm_buddy_free_list(&mm, &test_blocks, 0); + + end = ktime_get(); + elapsed_ms = ktime_to_ms(ktime_sub(end, start)); + + kunit_info(test, "Fragmented allocation took %lu ms\n", elapsed_ms); + + drm_buddy_fini(&mm); + + /* + * Reverse free order under fragmentation + * + * Construct a fragmented 4 GiB space by allocating every 8 KiB block with + * 64 KiB alignment, creating a dense scatter of small regions. Half of the + * blocks are selectively freed to form sparse gaps, while the remaining + * allocations are preserved, reordered in reverse, and released back with + * the cleared flag. This models a pathological reverse-ordered free pattern + * and measures how quickly the allocator can merge and reclaim space when + * deallocation occurs in the opposite order of allocation, exposing the + * cost difference between a linear freelist scan and an ordered tree lookup. + */ + ret = drm_buddy_init(&mm, mm_size, SZ_4K); + KUNIT_ASSERT_EQ(test, ret, 0); + + start = ktime_get(); + /* Allocate maximum fragmentation */ + for (i = 0; i < num_blocks; i++) + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, + &allocated_blocks, 0), + "buddy_alloc hit an error size=%u\n", SZ_8K); + + list_for_each_entry_safe(block, tmp, &allocated_blocks, link) { + if (count % 2 == 0) + list_move_tail(&block->link, &free_list); + count++; + } + drm_buddy_free_list(&mm, &free_list, DRM_BUDDY_CLEARED); + + list_for_each_entry_safe_reverse(block, tmp, &allocated_blocks, link) + list_move(&block->link, &reverse_list); + drm_buddy_free_list(&mm, &reverse_list, DRM_BUDDY_CLEARED); + + end = ktime_get(); + elapsed_ms = ktime_to_ms(ktime_sub(end, start)); + + kunit_info(test, "Reverse-ordered free took %lu ms\n", elapsed_ms); + + drm_buddy_fini(&mm); +} + static void drm_test_buddy_alloc_range_bias(struct kunit *test) { u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem; @@ -772,6 +876,7 @@ static struct kunit_case drm_buddy_tests[] = { KUNIT_CASE(drm_test_buddy_alloc_contiguous), KUNIT_CASE(drm_test_buddy_alloc_clear), KUNIT_CASE(drm_test_buddy_alloc_range_bias), + KUNIT_CASE(drm_test_buddy_fragmentation_performance), {} }; diff --git a/drivers/gpu/drm/tests/drm_fixp_test.c b/drivers/gpu/drm/tests/drm_fixp_test.c new file mode 100644 index 000000000000..dd77fdedb2a9 --- /dev/null +++ b/drivers/gpu/drm/tests/drm_fixp_test.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + */ + +#include <kunit/test.h> +#include <drm/drm_fixed.h> + +static void drm_test_sm2fixp(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, 0x7fffffffffffffffll, ((1ull << 63) - 1)); + + /* 1 */ + KUNIT_EXPECT_EQ(test, drm_int2fixp(1), drm_sm2fixp(1ull << DRM_FIXED_POINT)); + + /* -1 */ + KUNIT_EXPECT_EQ(test, drm_int2fixp(-1), + drm_sm2fixp((1ull << 63) | (1ull << DRM_FIXED_POINT))); + + /* 0.5 */ + KUNIT_EXPECT_EQ(test, drm_fixp_from_fraction(1, 2), + drm_sm2fixp(1ull << (DRM_FIXED_POINT - 1))); + + /* -0.5 */ + KUNIT_EXPECT_EQ(test, drm_fixp_from_fraction(-1, 2), + drm_sm2fixp((1ull << 63) | (1ull << (DRM_FIXED_POINT - 1)))); +} + +static void drm_test_int2fixp(struct kunit *test) +{ + /* 1 */ + KUNIT_EXPECT_EQ(test, 1ll << 32, drm_int2fixp(1)); + + /* -1 */ + KUNIT_EXPECT_EQ(test, -(1ll << 32), drm_int2fixp(-1)); + + /* 1 + (-1) = 0 */ + KUNIT_EXPECT_EQ(test, 0, drm_int2fixp(1) + drm_int2fixp(-1)); + + /* 1 / 2 */ + KUNIT_EXPECT_EQ(test, 1ll << 31, drm_fixp_from_fraction(1, 2)); + + /* -0.5 */ + KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(-1, 2)); + + /* (1 / 2) + (-1) = 0.5 */ + KUNIT_EXPECT_EQ(test, 1ll << 31, drm_fixp_from_fraction(-1, 2) + drm_int2fixp(1)); + + /* (1 / 2) - 1) = 0.5 */ + KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(1, 2) + drm_int2fixp(-1)); + + /* (1 / 2) - 1) = 0.5 */ + KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(1, 2) - drm_int2fixp(1)); +} + +static struct kunit_case drm_fixp_tests[] = { + KUNIT_CASE(drm_test_int2fixp), + KUNIT_CASE(drm_test_sm2fixp), + { } +}; + +static struct kunit_suite drm_fixp_test_suite = { + .name = "drm_fixp", + .test_cases = drm_fixp_tests, +}; + +kunit_test_suite(drm_fixp_test_suite); + +MODULE_AUTHOR("AMD"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Unit tests for drm_fixed.h"); diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c index 6174d0929020..aec9eccdeae9 100644 --- a/drivers/gpu/drm/tests/drm_mm_test.c +++ b/drivers/gpu/drm/tests/drm_mm_test.c @@ -14,6 +14,7 @@ #include <linux/ktime.h> #include <drm/drm_mm.h> +#include <drm/drm_print.h> #include "../lib/drm_random.h" diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index da89fd01c337..8f81eb560b9e 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -8,6 +8,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_gem_dma_helper.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "tidss_crtc.h" @@ -94,8 +95,6 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, struct drm_display_mode *mode; enum drm_mode_status ok; - dev_dbg(ddev->dev, "%s\n", __func__); - if (!crtc_state->enable) return 0; @@ -103,7 +102,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, ok = dispc_vp_mode_valid(dispc, hw_videoport, mode); if (ok != MODE_OK) { - dev_dbg(ddev->dev, "%s: bad mode: %ux%u pclk %u kHz\n", + drm_dbg(ddev, "%s: bad mode: %ux%u pclk %u kHz\n", __func__, mode->hdisplay, mode->vdisplay, mode->clock); return -EINVAL; } @@ -172,7 +171,7 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc, struct tidss_device *tidss = to_tidss(ddev); unsigned long flags; - dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n", + drm_dbg(ddev, "%s: %s is %sactive, %s modeset, event %p\n", __func__, crtc->name, crtc->state->active ? "" : "not ", drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need", crtc->state->event); @@ -244,11 +243,15 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc, dispc_vp_prepare(tidss->dispc, tcrtc->hw_videoport, crtc->state); - dispc_vp_enable(tidss->dispc, tcrtc->hw_videoport, crtc->state); - spin_lock_irqsave(&ddev->event_lock, flags); + dispc_vp_enable(tidss->dispc, tcrtc->hw_videoport); + if (crtc->state->event) { + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + + vblank->time = ktime_get(); + drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } @@ -328,8 +331,6 @@ static int tidss_crtc_enable_vblank(struct drm_crtc *crtc) struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); - dev_dbg(ddev->dev, "%s\n", __func__); - tidss_runtime_get(tidss); tidss_irq_enable_vblank(crtc); @@ -342,29 +343,34 @@ static void tidss_crtc_disable_vblank(struct drm_crtc *crtc) struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); - dev_dbg(ddev->dev, "%s\n", __func__); - tidss_irq_disable_vblank(crtc); tidss_runtime_put(tidss); } +static void tidss_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct tidss_crtc_state *tstate = to_tidss_crtc_state(state); + + __drm_atomic_helper_crtc_destroy_state(&tstate->base); + kfree(tstate); +} + static void tidss_crtc_reset(struct drm_crtc *crtc) { - struct tidss_crtc_state *tcrtc; + struct tidss_crtc_state *tstate; if (crtc->state) - __drm_atomic_helper_crtc_destroy_state(crtc->state); - - kfree(crtc->state); + tidss_crtc_destroy_state(crtc, crtc->state); - tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL); - if (!tcrtc) { + tstate = kzalloc(sizeof(*tstate), GFP_KERNEL); + if (!tstate) { crtc->state = NULL; return; } - __drm_atomic_helper_crtc_reset(crtc, &tcrtc->base); + __drm_atomic_helper_crtc_reset(crtc, &tstate->base); } static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc) @@ -404,7 +410,7 @@ static const struct drm_crtc_funcs tidss_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = tidss_crtc_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .atomic_destroy_state = tidss_crtc_destroy_state, .enable_vblank = tidss_crtc_enable_vblank, .disable_vblank = tidss_crtc_disable_vblank, }; diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 7c8c15a5c39b..58d5eb033bdb 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -27,6 +27,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include "tidss_crtc.h" #include "tidss_dispc.h" @@ -57,12 +58,6 @@ static const u16 tidss_k2g_common_regs[DISPC_COMMON_REG_TABLE_LEN] = { }; const struct dispc_features dispc_k2g_feats = { - .min_pclk_khz = 4375, - - .max_pclk_khz = { - [DISPC_VP_DPI] = 150000, - }, - /* * XXX According TRM the RGB input buffer width up to 2560 should * work on 3 taps, but in practice it only works up to 1280. @@ -145,11 +140,6 @@ static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = { }; const struct dispc_features dispc_am65x_feats = { - .max_pclk_khz = { - [DISPC_VP_DPI] = 165000, - [DISPC_VP_OLDI_AM65X] = 165000, - }, - .scaling = { .in_width_max_5tap_rgb = 1280, .in_width_max_3tap_rgb = 2560, @@ -245,11 +235,6 @@ static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = { }; const struct dispc_features dispc_j721e_feats = { - .max_pclk_khz = { - [DISPC_VP_DPI] = 170000, - [DISPC_VP_INTERNAL] = 600000, - }, - .scaling = { .in_width_max_5tap_rgb = 2048, .in_width_max_3tap_rgb = 4096, @@ -316,11 +301,6 @@ const struct dispc_features dispc_j721e_feats = { }; const struct dispc_features dispc_am625_feats = { - .max_pclk_khz = { - [DISPC_VP_DPI] = 165000, - [DISPC_VP_INTERNAL] = 170000, - }, - .scaling = { .in_width_max_5tap_rgb = 1280, .in_width_max_3tap_rgb = 2560, @@ -377,15 +357,6 @@ const struct dispc_features dispc_am625_feats = { }; const struct dispc_features dispc_am62a7_feats = { - /* - * if the code reaches dispc_mode_valid with VP1, - * it should return MODE_BAD. - */ - .max_pclk_khz = { - [DISPC_VP_TIED_OFF] = 0, - [DISPC_VP_DPI] = 165000, - }, - .scaling = { .in_width_max_5tap_rgb = 1280, .in_width_max_3tap_rgb = 2560, @@ -442,10 +413,6 @@ const struct dispc_features dispc_am62a7_feats = { }; const struct dispc_features dispc_am62l_feats = { - .max_pclk_khz = { - [DISPC_VP_DPI] = 165000, - }, - .subrev = DISPC_AM62L, .common = "common", @@ -1051,20 +1018,22 @@ struct dispc_bus_format *dispc_vp_find_bus_fmt(struct dispc_device *dispc, int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport, const struct drm_crtc_state *state) { + struct tidss_device *tidss = dispc->tidss; + struct drm_device *dev = &tidss->ddev; const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state); const struct dispc_bus_format *fmt; fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format, tstate->bus_flags); if (!fmt) { - dev_dbg(dispc->dev, "%s: Unsupported bus format: %u\n", + drm_dbg(dev, "%s: Unsupported bus format: %u\n", __func__, tstate->bus_format); return -EINVAL; } if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI_AM65X && fmt->is_oldi_fmt) { - dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n", + drm_dbg(dev, "%s: %s is not OLDI-port\n", __func__, dispc->feat->vp_name[hw_videoport]); return -EINVAL; } @@ -1161,6 +1130,9 @@ void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport, { const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state); const struct dispc_bus_format *fmt; + const struct drm_display_mode *mode = &state->adjusted_mode; + bool align, onoff, rf, ieo, ipc, ihs, ivs; + u32 hsw, hfp, hbp, vsw, vfp, vbp; fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format, tstate->bus_flags); @@ -1173,22 +1145,6 @@ void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport, dispc_enable_am65x_oldi(dispc, hw_videoport, fmt); } -} - -void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, - const struct drm_crtc_state *state) -{ - const struct drm_display_mode *mode = &state->adjusted_mode; - const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state); - bool align, onoff, rf, ieo, ipc, ihs, ivs; - const struct dispc_bus_format *fmt; - u32 hsw, hfp, hbp, vsw, vfp, vbp; - - fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format, - tstate->bus_flags); - - if (WARN_ON(!fmt)) - return; dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width); @@ -1244,7 +1200,10 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, mode->crtc_hdisplay - 1) | FIELD_PREP(DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK, mode->crtc_vdisplay - 1)); +} +void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport) +{ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, DISPC_VP_CONTROL_ENABLE_MASK); } @@ -1331,33 +1290,61 @@ static void dispc_vp_set_default_color(struct dispc_device *dispc, DISPC_OVR_DEFAULT_COLOR2, (v >> 32) & 0xffff); } +/* + * Calculate the percentage difference between the requested pixel clock rate + * and the effective rate resulting from calculating the clock divider value. + */ +unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate) +{ + int r = rate / 100, rr = real_rate / 100; + + return (unsigned int)(abs(((rr - r) * 100) / r)); +} + +static int check_pixel_clock(struct dispc_device *dispc, u32 hw_videoport, + unsigned long clock) +{ + unsigned long round_clock; + + /* + * For VP's with external clocking, clock operations must be + * delegated to respective driver, so we skip the check here. + */ + if (dispc->tidss->is_ext_vp_clk[hw_videoport]) + return 0; + + round_clock = clk_round_rate(dispc->vp_clk[hw_videoport], clock); + /* + * To keep the check consistent with dispc_vp_set_clk_rate(), we + * use the same 5% check here. + */ + if (dispc_pclk_diff(clock, round_clock) > 5) + return -EINVAL; + + return 0; +} + enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc, u32 hw_videoport, const struct drm_display_mode *mode) { u32 hsw, hfp, hbp, vsw, vfp, vbp; enum dispc_vp_bus_type bus_type; - int max_pclk; bus_type = dispc->feat->vp_bus_type[hw_videoport]; - max_pclk = dispc->feat->max_pclk_khz[bus_type]; - - if (WARN_ON(max_pclk == 0)) + if (WARN_ON(bus_type == DISPC_VP_TIED_OFF)) return MODE_BAD; - if (mode->clock < dispc->feat->min_pclk_khz) - return MODE_CLOCK_LOW; - - if (mode->clock > max_pclk) - return MODE_CLOCK_HIGH; - if (mode->hdisplay > 4096) return MODE_BAD; if (mode->vdisplay > 4096) return MODE_BAD; + if (check_pixel_clock(dispc, hw_videoport, mode->clock * 1000)) + return MODE_CLOCK_RANGE; + /* TODO: add interlace support */ if (mode->flags & DRM_MODE_FLAG_INTERLACE) return MODE_NO_INTERLACE; @@ -1421,17 +1408,6 @@ void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport) clk_disable_unprepare(dispc->vp_clk[hw_videoport]); } -/* - * Calculate the percentage difference between the requested pixel clock rate - * and the effective rate resulting from calculating the clock divider value. - */ -unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate) -{ - int r = rate / 100, rr = real_rate / 100; - - return (unsigned int)(abs(((rr - r) * 100) / r)); -} - int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport, unsigned long rate) { @@ -2849,8 +2825,6 @@ int dispc_runtime_resume(struct dispc_device *dispc) void dispc_remove(struct tidss_device *tidss) { - dev_dbg(tidss->dev, "%s\n", __func__); - tidss->dispc = NULL; } @@ -2992,8 +2966,6 @@ int dispc_init(struct tidss_device *tidss) unsigned int i, num_fourccs; int r = 0; - dev_dbg(dev, "%s\n", __func__); - feat = tidss->feat; if (feat->subrev != DISPC_K2G) { diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h index 60c1b400eb89..739d211d0018 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.h +++ b/drivers/gpu/drm/tidss/tidss_dispc.h @@ -77,9 +77,6 @@ enum dispc_dss_subrevision { }; struct dispc_features { - int min_pclk_khz; - int max_pclk_khz[DISPC_VP_MAX_BUS_TYPE]; - struct dispc_features_scaling scaling; enum dispc_dss_subrevision subrev; @@ -119,8 +116,7 @@ void dispc_ovr_enable_layer(struct dispc_device *dispc, void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport, const struct drm_crtc_state *state); -void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, - const struct drm_crtc_state *state); +void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport); void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport); void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport); bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport); diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 27d9a8fd541f..1c8cc18bc53c 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -33,8 +33,6 @@ int tidss_runtime_get(struct tidss_device *tidss) { int r; - dev_dbg(tidss->dev, "%s\n", __func__); - r = pm_runtime_resume_and_get(tidss->dev); WARN_ON(r < 0); return r; @@ -44,8 +42,6 @@ void tidss_runtime_put(struct tidss_device *tidss) { int r; - dev_dbg(tidss->dev, "%s\n", __func__); - pm_runtime_mark_last_busy(tidss->dev); r = pm_runtime_put_autosuspend(tidss->dev); @@ -56,8 +52,6 @@ static int __maybe_unused tidss_pm_runtime_suspend(struct device *dev) { struct tidss_device *tidss = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - return dispc_runtime_suspend(tidss->dispc); } @@ -66,8 +60,6 @@ static int __maybe_unused tidss_pm_runtime_resume(struct device *dev) struct tidss_device *tidss = dev_get_drvdata(dev); int r; - dev_dbg(dev, "%s\n", __func__); - r = dispc_runtime_resume(tidss->dispc); if (r) return r; @@ -79,8 +71,6 @@ static int __maybe_unused tidss_suspend(struct device *dev) { struct tidss_device *tidss = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - return drm_mode_config_helper_suspend(&tidss->ddev); } @@ -88,8 +78,6 @@ static int __maybe_unused tidss_resume(struct device *dev) { struct tidss_device *tidss = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - return drm_mode_config_helper_resume(&tidss->ddev); } @@ -127,8 +115,6 @@ static int tidss_probe(struct platform_device *pdev) int ret; int irq; - dev_dbg(dev, "%s\n", __func__); - tidss = devm_drm_dev_alloc(&pdev->dev, &tidss_driver, struct tidss_device, ddev); if (IS_ERR(tidss)) @@ -228,8 +214,6 @@ static void tidss_remove(struct platform_device *pdev) struct tidss_device *tidss = platform_get_drvdata(pdev); struct drm_device *ddev = &tidss->ddev; - dev_dbg(dev, "%s\n", __func__); - drm_dev_unregister(ddev); drm_atomic_helper_shutdown(ddev); diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h index 84454a4855d1..e1c1f41d8b4b 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.h +++ b/drivers/gpu/drm/tidss/tidss_drv.h @@ -24,6 +24,8 @@ struct tidss_device { const struct dispc_features *feat; struct dispc_device *dispc; + bool is_ext_vp_clk[TIDSS_MAX_PORTS]; + unsigned int num_crtcs; struct drm_crtc *crtcs[TIDSS_MAX_PORTS]; diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index c34eb90cddbe..86eb5d97410b 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -24,8 +24,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state) struct drm_device *ddev = old_state->dev; struct tidss_device *tidss = to_tidss(ddev); - dev_dbg(ddev->dev, "%s\n", __func__); - tidss_runtime_get(tidss); drm_atomic_helper_commit_modeset_disables(ddev, old_state); @@ -245,8 +243,6 @@ int tidss_modeset_init(struct tidss_device *tidss) struct drm_device *ddev = &tidss->ddev; int ret; - dev_dbg(tidss->dev, "%s\n", __func__); - ret = drmm_mode_config_init(ddev); if (ret) return ret; diff --git a/drivers/gpu/drm/tidss/tidss_oldi.c b/drivers/gpu/drm/tidss/tidss_oldi.c index 7688251beba2..17c535bfa057 100644 --- a/drivers/gpu/drm/tidss/tidss_oldi.c +++ b/drivers/gpu/drm/tidss/tidss_oldi.c @@ -309,6 +309,25 @@ static u32 *tidss_oldi_atomic_get_input_bus_fmts(struct drm_bridge *bridge, return input_fmts; } +static enum drm_mode_status +tidss_oldi_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge); + unsigned long round_clock; + + round_clock = clk_round_rate(oldi->serial, mode->clock * 7 * 1000); + /* + * To keep the check consistent with dispc_vp_set_clk_rate(), + * we use the same 5% check here. + */ + if (dispc_pclk_diff(mode->clock * 7 * 1000, round_clock) > 5) + return -EINVAL; + + return 0; +} + static const struct drm_bridge_funcs tidss_oldi_bridge_funcs = { .attach = tidss_oldi_bridge_attach, .atomic_pre_enable = tidss_oldi_atomic_pre_enable, @@ -317,6 +336,7 @@ static const struct drm_bridge_funcs tidss_oldi_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, + .mode_valid = tidss_oldi_mode_valid, }; static int get_oldi_mode(struct device_node *oldi_tx, int *companion_instance) @@ -430,6 +450,7 @@ void tidss_oldi_deinit(struct tidss_device *tidss) for (int i = 0; i < tidss->num_oldis; i++) { if (tidss->oldis[i]) { drm_bridge_remove(&tidss->oldis[i]->bridge); + tidss->is_ext_vp_clk[tidss->oldis[i]->parent_vp] = false; tidss->oldis[i] = NULL; } } @@ -580,6 +601,7 @@ int tidss_oldi_init(struct tidss_device *tidss) oldi->bridge.timings = &default_tidss_oldi_timings; tidss->oldis[tidss->num_oldis++] = oldi; + tidss->is_ext_vp_clk[oldi->parent_vp] = true; oldi->tidss = tidss; drm_bridge_add(&oldi->bridge); diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c index 142ae81951a0..bd10bc1b9961 100644 --- a/drivers/gpu/drm/tidss/tidss_plane.c +++ b/drivers/gpu/drm/tidss/tidss_plane.c @@ -42,8 +42,6 @@ static int tidss_plane_atomic_check(struct drm_plane *plane, u32 hw_videoport; int ret; - dev_dbg(ddev->dev, "%s\n", __func__); - if (!new_plane_state->crtc) { /* * The visible field is not reset by the DRM core but only @@ -124,8 +122,6 @@ static void tidss_plane_atomic_update(struct drm_plane *plane, plane); u32 hw_videoport; - dev_dbg(ddev->dev, "%s\n", __func__); - if (!new_state->visible) { dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false); return; @@ -143,8 +139,6 @@ static void tidss_plane_atomic_enable(struct drm_plane *plane, struct tidss_device *tidss = to_tidss(ddev); struct tidss_plane *tplane = to_tidss_plane(plane); - dev_dbg(ddev->dev, "%s\n", __func__); - dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true); } @@ -155,8 +149,6 @@ static void tidss_plane_atomic_disable(struct drm_plane *plane, struct tidss_device *tidss = to_tidss(ddev); struct tidss_plane *tplane = to_tidss_plane(plane); - dev_dbg(ddev->dev, "%s\n", __func__); - dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false); } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index b5f60b2b2d0e..5718d9d83a49 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -676,14 +676,7 @@ static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc, if (!crtc_state->active) return 0; - if (state->planes[0].ptr != crtc->primary || - state->planes[0].state == NULL || - state->planes[0].state->crtc != crtc) { - dev_dbg(crtc->dev->dev, "CRTC primary plane must be present"); - return -EINVAL; - } - - return 0; + return drm_atomic_helper_check_crtc_primary_plane(crtc_state); } static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c index cf77a8ce7398..aa72ca679598 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c @@ -42,8 +42,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); /* we should have a crtc state if the plane is attached to a crtc */ if (WARN_ON(!crtc_state)) return 0; diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index 7d9e85e932d7..f0e72d4b6a47 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -85,6 +85,7 @@ config DRM_PANEL_MIPI_DBI config DRM_PIXPAPER tristate "DRM support for PIXPAPER display panels" depends on DRM && SPI + depends on MMU select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index d2d5e9f1269f..222e4ae1abbd 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -21,7 +21,10 @@ #include <drm/drm_module.h> #include <drm/drm_panic.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #include <video/vga.h> @@ -526,6 +529,7 @@ static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct bochs_device *bochs = to_bochs_device(crtc->dev); bochs_hw_blank(bochs, false); + drm_crtc_vblank_on(crtc); } static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc, @@ -533,12 +537,14 @@ static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc, { struct bochs_device *bochs = to_bochs_device(crtc->dev); + drm_crtc_vblank_off(crtc); bochs_hw_blank(bochs, true); } static const struct drm_crtc_helper_funcs bochs_crtc_helper_funcs = { .mode_set_nofb = bochs_crtc_helper_mode_set_nofb, .atomic_check = bochs_crtc_helper_atomic_check, + .atomic_flush = drm_crtc_vblank_atomic_flush, .atomic_enable = bochs_crtc_helper_atomic_enable, .atomic_disable = bochs_crtc_helper_atomic_disable, }; @@ -550,6 +556,7 @@ static const struct drm_crtc_funcs bochs_crtc_funcs = { .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + DRM_CRTC_VBLANK_TIMER_FUNCS, }; static const struct drm_encoder_funcs bochs_encoder_funcs = { @@ -670,6 +677,10 @@ static int bochs_kms_init(struct bochs_device *bochs) drm_connector_attach_edid_property(connector); drm_connector_attach_encoder(connector, encoder); + ret = drm_vblank_init(dev, 1); + if (ret) + return ret; + drm_mode_config_reset(dev); return 0; diff --git a/drivers/gpu/drm/tiny/cirrus-qemu.c b/drivers/gpu/drm/tiny/cirrus-qemu.c index 97a93adc5669..9ba0eab489bb 100644 --- a/drivers/gpu/drm/tiny/cirrus-qemu.c +++ b/drivers/gpu/drm/tiny/cirrus-qemu.c @@ -44,7 +44,10 @@ #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_module.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #define DRIVER_NAME "cirrus-qemu" #define DRIVER_DESC "qemu cirrus vga" @@ -404,11 +407,15 @@ static void cirrus_crtc_helper_atomic_enable(struct drm_crtc *crtc, #endif drm_dev_exit(idx); + + drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs cirrus_crtc_helper_funcs = { .atomic_check = cirrus_crtc_helper_atomic_check, + .atomic_flush = drm_crtc_vblank_atomic_flush, .atomic_enable = cirrus_crtc_helper_atomic_enable, + .atomic_disable = drm_crtc_vblank_atomic_disable, }; static const struct drm_crtc_funcs cirrus_crtc_funcs = { @@ -418,6 +425,7 @@ static const struct drm_crtc_funcs cirrus_crtc_funcs = { .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + DRM_CRTC_VBLANK_TIMER_FUNCS, }; static const struct drm_encoder_funcs cirrus_encoder_funcs = { @@ -493,6 +501,10 @@ static int cirrus_pipe_init(struct cirrus_device *cirrus) if (ret) return ret; + ret = drm_vblank_init(dev, 1); + if (ret) + return ret; + return 0; } diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index fb0004166f4a..d73dfebb4353 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -25,6 +25,7 @@ #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index df263818f45f..9f26aaca0bfa 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -25,6 +25,7 @@ #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_print.h> #include <video/mipi_display.h> #define HX8357D_SETOSC 0xb0 diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c index 62cadf5e033d..7c154c008344 100644 --- a/drivers/gpu/drm/tiny/ili9163.c +++ b/drivers/gpu/drm/tiny/ili9163.c @@ -15,6 +15,7 @@ #include <drm/drm_gem_dma_helper.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_print.h> #include <video/mipi_display.h> diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index 6de44ff69b51..d32538b1eb09 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -29,6 +29,7 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> +#include <drm/drm_print.h> #include <drm/drm_rect.h> #define ILI9225_DRIVER_READ_CODE 0x00 diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index e55029433509..2ab750cba505 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -24,6 +24,7 @@ #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_print.h> #include <video/mipi_display.h> #define ILI9341_FRMCTR1 0xb1 diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index 093661c771a0..1e411a0f4567 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -23,6 +23,7 @@ #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_print.h> #define ILI9486_ITFCTR1 0xb0 #define ILI9486_PWCTRL1 0xc2 diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index b6b4664908ae..a063eff77624 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -22,6 +22,7 @@ #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_print.h> #include <video/mipi_display.h> #define ILI9341_FRMCTR1 0xb1 diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c index 23914a9f7fd3..82dfa169f762 100644 --- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c +++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c @@ -25,6 +25,7 @@ #include <drm/drm_mipi_dbi.h> #include <drm/drm_modes.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_print.h> #include <video/mipi_display.h> diff --git a/drivers/gpu/drm/tiny/pixpaper.c b/drivers/gpu/drm/tiny/pixpaper.c index 32598fb2fee7..df3ec42edd57 100644 --- a/drivers/gpu/drm/tiny/pixpaper.c +++ b/drivers/gpu/drm/tiny/pixpaper.c @@ -17,6 +17,7 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> /* diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 4824f863fdba..c8270591afc7 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -36,6 +36,7 @@ #include <drm/drm_managed.h> #include <drm/drm_modes.h> #include <drm/drm_rect.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c index 6c77550c51af..d468f8322072 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c @@ -251,7 +251,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; @@ -290,7 +290,7 @@ static void ttm_bo_unreserve_pinned(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; @@ -342,7 +342,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test) resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, resv); - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; @@ -379,7 +379,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test) dma_resv_fini(resv); } -static void ttm_bo_put_basic(struct kunit *test) +static void ttm_bo_fini_basic(struct kunit *test) { struct ttm_test_devices *priv = test->priv; struct ttm_buffer_object *bo; @@ -394,7 +394,7 @@ static void ttm_bo_put_basic(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; @@ -410,7 +410,7 @@ static void ttm_bo_put_basic(struct kunit *test) dma_resv_unlock(bo->base.resv); KUNIT_EXPECT_EQ(test, err, 0); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static const char *mock_name(struct dma_fence *f) @@ -423,7 +423,7 @@ static const struct dma_fence_ops mock_fence_ops = { .get_timeline_name = mock_name, }; -static void ttm_bo_put_shared_resv(struct kunit *test) +static void ttm_bo_fini_shared_resv(struct kunit *test) { struct ttm_test_devices *priv = test->priv; struct ttm_buffer_object *bo; @@ -437,7 +437,7 @@ static void ttm_bo_put_shared_resv(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; @@ -463,7 +463,7 @@ static void ttm_bo_put_shared_resv(struct kunit *test) bo->type = ttm_bo_type_device; bo->base.resv = external_resv; - ttm_bo_put(bo); + ttm_bo_fini(bo); } static void ttm_bo_pin_basic(struct kunit *test) @@ -477,7 +477,7 @@ static void ttm_bo_pin_basic(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; @@ -512,7 +512,7 @@ static void ttm_bo_pin_unpin_resource(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; @@ -563,7 +563,7 @@ static void ttm_bo_multiple_pin_one_unpin(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; @@ -616,8 +616,8 @@ static struct kunit_case ttm_bo_test_cases[] = { KUNIT_CASE(ttm_bo_unreserve_basic), KUNIT_CASE(ttm_bo_unreserve_pinned), KUNIT_CASE(ttm_bo_unreserve_bulk), - KUNIT_CASE(ttm_bo_put_basic), - KUNIT_CASE(ttm_bo_put_shared_resv), + KUNIT_CASE(ttm_bo_fini_basic), + KUNIT_CASE(ttm_bo_fini_shared_resv), KUNIT_CASE(ttm_bo_pin_basic), KUNIT_CASE(ttm_bo_pin_unpin_resource), KUNIT_CASE(ttm_bo_multiple_pin_one_unpin), diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c index 1bcc67977f48..2eda87882e65 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c @@ -144,7 +144,7 @@ static void ttm_bo_init_reserved_sys_man(struct kunit *test) drm_mm_node_allocated(&bo->base.vma_node.vm_node)); ttm_resource_free(bo, &bo->resource); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static void ttm_bo_init_reserved_mock_man(struct kunit *test) @@ -186,7 +186,7 @@ static void ttm_bo_init_reserved_mock_man(struct kunit *test) drm_mm_node_allocated(&bo->base.vma_node.vm_node)); ttm_resource_free(bo, &bo->resource); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, mem_type); } @@ -221,7 +221,7 @@ static void ttm_bo_init_reserved_resv(struct kunit *test) KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv); ttm_resource_free(bo, &bo->resource); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static void ttm_bo_validate_basic(struct kunit *test) @@ -265,7 +265,7 @@ static void ttm_bo_validate_basic(struct kunit *test) KUNIT_EXPECT_EQ(test, bo->resource->placement, DRM_BUDDY_TOPDOWN_ALLOCATION); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, snd_mem); } @@ -292,7 +292,7 @@ static void ttm_bo_validate_invalid_placement(struct kunit *test) KUNIT_EXPECT_EQ(test, err, -ENOMEM); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static void ttm_bo_validate_failed_alloc(struct kunit *test) @@ -321,7 +321,7 @@ static void ttm_bo_validate_failed_alloc(struct kunit *test) KUNIT_EXPECT_EQ(test, err, -ENOMEM); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_bad_manager_fini(priv->ttm_dev, mem_type); } @@ -353,7 +353,7 @@ static void ttm_bo_validate_pinned(struct kunit *test) ttm_bo_unpin(bo); dma_resv_unlock(bo->base.resv); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = { @@ -403,7 +403,7 @@ static void ttm_bo_validate_same_placement(struct kunit *test) KUNIT_EXPECT_EQ(test, err, 0); KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0); - ttm_bo_put(bo); + ttm_bo_fini(bo); if (params->mem_type != TTM_PL_SYSTEM) ttm_mock_manager_fini(priv->ttm_dev, params->mem_type); @@ -452,7 +452,7 @@ static void ttm_bo_validate_busy_placement(struct kunit *test) KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem); KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority])); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_bad_manager_fini(priv->ttm_dev, fst_mem); ttm_mock_manager_fini(priv->ttm_dev, snd_mem); } @@ -495,7 +495,7 @@ static void ttm_bo_validate_multihop(struct kunit *test) KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2); KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, fst_mem); ttm_mock_manager_fini(priv->ttm_dev, tmp_mem); @@ -567,7 +567,7 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test) KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC); } - ttm_bo_put(bo); + ttm_bo_fini(bo); } static int threaded_dma_resv_signal(void *arg) @@ -635,7 +635,7 @@ static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test) /* Make sure we have an idle object at this point */ dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static void ttm_bo_validate_move_fence_signaled(struct kunit *test) @@ -652,7 +652,7 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test) int err; man = ttm_manager_type(priv->ttm_dev, mem_type); - man->move = dma_fence_get_stub(); + man->eviction_fences[0] = dma_fence_get_stub(); bo = ttm_bo_kunit_init(test, test->priv, size, NULL); bo->type = bo_type; @@ -668,8 +668,8 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test) KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type); KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size); - ttm_bo_put(bo); - dma_fence_put(man->move); + ttm_bo_fini(bo); + dma_fence_put(man->eviction_fences[0]); } static const struct ttm_bo_validate_test_case ttm_bo_validate_wait_cases[] = { @@ -733,9 +733,9 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test) spin_lock_init(&fence_lock); man = ttm_manager_type(priv->ttm_dev, fst_mem); - man->move = alloc_mock_fence(test); + man->eviction_fences[0] = alloc_mock_fence(test); - task = kthread_create(threaded_fence_signal, man->move, "move-fence-signal"); + task = kthread_create(threaded_fence_signal, man->eviction_fences[0], "move-fence-signal"); if (IS_ERR(task)) KUNIT_FAIL(test, "Couldn't create move fence signal task\n"); @@ -743,7 +743,8 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test) err = ttm_bo_validate(bo, placement_val, &ctx_val); dma_resv_unlock(bo->base.resv); - dma_fence_wait_timeout(man->move, false, MAX_SCHEDULE_TIMEOUT); + dma_fence_wait_timeout(man->eviction_fences[0], false, MAX_SCHEDULE_TIMEOUT); + man->eviction_fences[0] = NULL; KUNIT_EXPECT_EQ(test, err, 0); KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size); @@ -753,7 +754,7 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test) else KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, fst_mem); ttm_mock_manager_fini(priv->ttm_dev, snd_mem); } @@ -807,8 +808,8 @@ static void ttm_bo_validate_happy_evict(struct kunit *test) KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type); for (i = 0; i < bo_no; i++) - ttm_bo_put(&bos[i]); - ttm_bo_put(bo_val); + ttm_bo_fini(&bos[i]); + ttm_bo_fini(bo_val); ttm_mock_manager_fini(priv->ttm_dev, mem_type); ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); @@ -852,12 +853,12 @@ static void ttm_bo_validate_all_pinned_evict(struct kunit *test) KUNIT_EXPECT_EQ(test, err, -ENOMEM); - ttm_bo_put(bo_small); + ttm_bo_fini(bo_small); ttm_bo_reserve(bo_big, false, false, NULL); ttm_bo_unpin(bo_big); dma_resv_unlock(bo_big->base.resv); - ttm_bo_put(bo_big); + ttm_bo_fini(bo_big); ttm_mock_manager_fini(priv->ttm_dev, mem_type); ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); @@ -916,13 +917,13 @@ static void ttm_bo_validate_allowed_only_evict(struct kunit *test) KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict); KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE); - ttm_bo_put(bo); - ttm_bo_put(bo_evictable); + ttm_bo_fini(bo); + ttm_bo_fini(bo_evictable); ttm_bo_reserve(bo_pinned, false, false, NULL); ttm_bo_unpin(bo_pinned); dma_resv_unlock(bo_pinned->base.resv); - ttm_bo_put(bo_pinned); + ttm_bo_fini(bo_pinned); ttm_mock_manager_fini(priv->ttm_dev, mem_type); ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); @@ -973,8 +974,8 @@ static void ttm_bo_validate_deleted_evict(struct kunit *test) KUNIT_EXPECT_NULL(test, bo_big->ttm); KUNIT_EXPECT_NULL(test, bo_big->resource); - ttm_bo_put(bo_small); - ttm_bo_put(bo_big); + ttm_bo_fini(bo_small); + ttm_bo_fini(bo_big); ttm_mock_manager_fini(priv->ttm_dev, mem_type); } @@ -995,7 +996,7 @@ static void ttm_bo_validate_busy_domain_evict(struct kunit *test) */ ttm_device_fini(priv->ttm_dev); - err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev, false, false); + err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev); KUNIT_ASSERT_EQ(test, err, 0); ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE); @@ -1025,8 +1026,8 @@ static void ttm_bo_validate_busy_domain_evict(struct kunit *test) KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type); KUNIT_EXPECT_NULL(test, bo_val->resource); - ttm_bo_put(bo_init); - ttm_bo_put(bo_val); + ttm_bo_fini(bo_init); + ttm_bo_fini(bo_val); ttm_mock_manager_fini(priv->ttm_dev, mem_type); ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict); @@ -1070,8 +1071,8 @@ static void ttm_bo_validate_evict_gutting(struct kunit *test) KUNIT_ASSERT_NULL(test, bo_evict->resource); KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC); - ttm_bo_put(bo_evict); - ttm_bo_put(bo); + ttm_bo_fini(bo_evict); + ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, mem_type); } @@ -1128,9 +1129,9 @@ static void ttm_bo_validate_recrusive_evict(struct kunit *test) ttm_mock_manager_fini(priv->ttm_dev, mem_type); ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict); - ttm_bo_put(bo_val); - ttm_bo_put(bo_tt); - ttm_bo_put(bo_mock); + ttm_bo_fini(bo_val); + ttm_bo_fini(bo_tt); + ttm_bo_fini(bo_mock); } static struct kunit_case ttm_bo_validate_test_cases[] = { diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c index 1621903818e5..2d55ad34fe48 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_device_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c @@ -7,11 +7,11 @@ #include <drm/ttm/ttm_placement.h> #include "ttm_kunit_helpers.h" +#include "../ttm_pool_internal.h" struct ttm_device_test_case { const char *description; - bool use_dma_alloc; - bool use_dma32; + unsigned int alloc_flags; bool pools_init_expected; }; @@ -25,7 +25,7 @@ static void ttm_device_init_basic(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs); @@ -55,7 +55,7 @@ static void ttm_device_init_multiple(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, ttm_devs); for (i = 0; i < num_dev; i++) { - err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false); + err = ttm_device_kunit_init(priv, &ttm_devs[i], 0); KUNIT_ASSERT_EQ(test, err, 0); KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping, @@ -81,7 +81,7 @@ static void ttm_device_fini_basic(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM); @@ -109,7 +109,7 @@ static void ttm_device_init_no_vma_man(struct kunit *test) vma_man = drm->vma_offset_manager; drm->vma_offset_manager = NULL; - err = ttm_device_kunit_init(priv, ttm_dev, false, false); + err = ttm_device_kunit_init(priv, ttm_dev, 0); KUNIT_EXPECT_EQ(test, err, -EINVAL); /* Bring the manager back for a graceful cleanup */ @@ -119,26 +119,22 @@ static void ttm_device_init_no_vma_man(struct kunit *test) static const struct ttm_device_test_case ttm_device_cases[] = { { .description = "No DMA allocations, no DMA32 required", - .use_dma_alloc = false, - .use_dma32 = false, .pools_init_expected = false, }, { .description = "DMA allocations, DMA32 required", - .use_dma_alloc = true, - .use_dma32 = true, + .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC | + TTM_ALLOCATION_POOL_USE_DMA32, .pools_init_expected = true, }, { .description = "No DMA allocations, DMA32 required", - .use_dma_alloc = false, - .use_dma32 = true, + .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA32, .pools_init_expected = false, }, { .description = "DMA allocations, no DMA32 required", - .use_dma_alloc = true, - .use_dma32 = false, + .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC, .pools_init_expected = true, }, }; @@ -162,16 +158,13 @@ static void ttm_device_init_pools(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(priv, ttm_dev, - params->use_dma_alloc, - params->use_dma32); + err = ttm_device_kunit_init(priv, ttm_dev, params->alloc_flags); KUNIT_ASSERT_EQ(test, err, 0); pool = &ttm_dev->pool; KUNIT_ASSERT_NOT_NULL(test, pool); KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev); - KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc); - KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32); + KUNIT_EXPECT_EQ(test, pool->alloc_flags, params->alloc_flags); if (params->pools_init_expected) { for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { @@ -181,7 +174,7 @@ static void ttm_device_init_pools(struct kunit *test) KUNIT_EXPECT_EQ(test, pt.caching, i); KUNIT_EXPECT_EQ(test, pt.order, j); - if (params->use_dma_alloc) + if (ttm_pool_uses_dma_alloc(pool)) KUNIT_ASSERT_FALSE(test, list_empty(&pt.pages)); } diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c index 7aaf0d1395ff..7b533e4e1e04 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c +++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c @@ -117,8 +117,7 @@ static void bad_evict_flags(struct ttm_buffer_object *bo, static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv, struct ttm_device *ttm, - bool use_dma_alloc, - bool use_dma32, + unsigned int alloc_flags, struct ttm_device_funcs *funcs) { struct drm_device *drm = priv->drm; @@ -127,7 +126,7 @@ static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv, err = ttm_device_init(ttm, funcs, drm->dev, drm->anon_inode->i_mapping, drm->vma_offset_manager, - use_dma_alloc, use_dma32); + alloc_flags); return err; } @@ -143,11 +142,10 @@ EXPORT_SYMBOL_GPL(ttm_dev_funcs); int ttm_device_kunit_init(struct ttm_test_devices *priv, struct ttm_device *ttm, - bool use_dma_alloc, - bool use_dma32) + unsigned int alloc_flags) { - return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc, - use_dma32, &ttm_dev_funcs); + return ttm_device_kunit_init_with_funcs(priv, ttm, alloc_flags, + &ttm_dev_funcs); } EXPORT_SYMBOL_GPL(ttm_device_kunit_init); @@ -161,12 +159,10 @@ struct ttm_device_funcs ttm_dev_funcs_bad_evict = { EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict); int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv, - struct ttm_device *ttm, - bool use_dma_alloc, - bool use_dma32) + struct ttm_device *ttm) { - return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc, - use_dma32, &ttm_dev_funcs_bad_evict); + return ttm_device_kunit_init_with_funcs(priv, ttm, 0, + &ttm_dev_funcs_bad_evict); } EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict); @@ -252,7 +248,7 @@ struct ttm_test_devices *ttm_test_devices_all(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); - err = ttm_device_kunit_init(devs, ttm_dev, false, false); + err = ttm_device_kunit_init(devs, ttm_dev, 0); KUNIT_ASSERT_EQ(test, err, 0); devs->ttm_dev = ttm_dev; diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h index c7da23232ffa..f8402b979d05 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h +++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h @@ -28,12 +28,9 @@ struct ttm_test_devices { /* Building blocks for test-specific init functions */ int ttm_device_kunit_init(struct ttm_test_devices *priv, struct ttm_device *ttm, - bool use_dma_alloc, - bool use_dma32); + unsigned int alloc_flags); int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv, - struct ttm_device *ttm, - bool use_dma_alloc, - bool use_dma32); + struct ttm_device *ttm); struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test, struct ttm_test_devices *devs, size_t size, diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c index d7eb6471f2ed..dd395229e388 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c +++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c @@ -4,6 +4,7 @@ */ #include <linux/export.h> +#include <linux/module.h> #include <drm/ttm/ttm_resource.h> #include <drm/ttm/ttm_device.h> diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c index 8ade53371f72..11c92bd75779 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c @@ -8,11 +8,12 @@ #include <drm/ttm/ttm_pool.h> #include "ttm_kunit_helpers.h" +#include "../ttm_pool_internal.h" struct ttm_pool_test_case { const char *description; unsigned int order; - bool use_dma_alloc; + unsigned int alloc_flags; }; struct ttm_pool_test_priv { @@ -86,7 +87,7 @@ static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test, pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, pool); - ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); + ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC); err = ttm_pool_alloc(pool, tt, &simple_ctx); KUNIT_ASSERT_EQ(test, err, 0); @@ -113,12 +114,12 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = { { .description = "One page, with coherent DMA mappings enabled", .order = 0, - .use_dma_alloc = true, + .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC, }, { .description = "Above the allocation limit, with coherent DMA mappings enabled", .order = MAX_PAGE_ORDER + 1, - .use_dma_alloc = true, + .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC, }, }; @@ -150,12 +151,11 @@ static void ttm_pool_alloc_basic(struct kunit *test) pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, pool); - ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc, - false); + ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->alloc_flags); KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev); KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE); - KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc); + KUNIT_ASSERT_EQ(test, pool->alloc_flags, params->alloc_flags); err = ttm_pool_alloc(pool, tt, &simple_ctx); KUNIT_ASSERT_EQ(test, err, 0); @@ -165,14 +165,14 @@ static void ttm_pool_alloc_basic(struct kunit *test) last_page = tt->pages[tt->num_pages - 1]; if (params->order <= MAX_PAGE_ORDER) { - if (params->use_dma_alloc) { + if (ttm_pool_uses_dma_alloc(pool)) { KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private); KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private); } else { KUNIT_ASSERT_EQ(test, fst_page->private, params->order); } } else { - if (params->use_dma_alloc) { + if (ttm_pool_uses_dma_alloc(pool)) { KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private); KUNIT_ASSERT_NULL(test, (void *)last_page->private); } else { @@ -218,7 +218,7 @@ static void ttm_pool_alloc_basic_dma_addr(struct kunit *test) pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, pool); - ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); + ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC); err = ttm_pool_alloc(pool, tt, &simple_ctx); KUNIT_ASSERT_EQ(test, err, 0); @@ -348,7 +348,7 @@ static void ttm_pool_free_dma_alloc(struct kunit *test) pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, pool); - ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); + ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC); ttm_pool_alloc(pool, tt, &simple_ctx); pt = &pool->caching[caching].orders[order]; @@ -379,7 +379,7 @@ static void ttm_pool_free_no_dma_alloc(struct kunit *test) pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, pool); - ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false); + ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, 0); ttm_pool_alloc(pool, tt, &simple_ctx); pt = &pool->caching[caching].orders[order]; diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c index e6ea2bd01f07..c0e4e35e0442 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c @@ -207,6 +207,7 @@ static void ttm_resource_manager_init_basic(struct kunit *test) struct ttm_resource_test_priv *priv = test->priv; struct ttm_resource_manager *man; size_t size = SZ_16K; + int i; man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, man); @@ -216,8 +217,8 @@ static void ttm_resource_manager_init_basic(struct kunit *test) KUNIT_ASSERT_PTR_EQ(test, man->bdev, priv->devs->ttm_dev); KUNIT_ASSERT_EQ(test, man->size, size); KUNIT_ASSERT_EQ(test, man->usage, 0); - KUNIT_ASSERT_NULL(test, man->move); - KUNIT_ASSERT_NOT_NULL(test, &man->move_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) + KUNIT_ASSERT_NULL(test, man->eviction_fences[i]); for (int i = 0; i < TTM_MAX_BO_PRIORITY; ++i) KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[i])); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 29423ceeec5c..bd27607f8076 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -31,6 +31,8 @@ #define pr_fmt(fmt) "[TTM] " fmt +#include <drm/drm_print.h> +#include <drm/ttm/ttm_allocation.h> #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_tt.h> @@ -318,18 +320,17 @@ static void ttm_bo_release(struct kref *kref) bo->destroy(bo); } -/** - * ttm_bo_put - * - * @bo: The buffer object. - * - * Unreference a buffer object. - */ +/* TODO: remove! */ void ttm_bo_put(struct ttm_buffer_object *bo) { kref_put(&bo->kref, ttm_bo_release); } -EXPORT_SYMBOL(ttm_bo_put); + +void ttm_bo_fini(struct ttm_buffer_object *bo) +{ + ttm_bo_put(bo); +} +EXPORT_SYMBOL(ttm_bo_fini); static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, @@ -658,34 +659,35 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo) EXPORT_SYMBOL(ttm_bo_unpin); /* - * Add the last move fence to the BO as kernel dependency and reserve a new - * fence slot. + * Add the pipelined eviction fencesto the BO as kernel dependency and reserve new + * fence slots. */ -static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, - struct ttm_resource_manager *man, - bool no_wait_gpu) +static int ttm_bo_add_pipelined_eviction_fences(struct ttm_buffer_object *bo, + struct ttm_resource_manager *man, + bool no_wait_gpu) { struct dma_fence *fence; - int ret; + int i; - spin_lock(&man->move_lock); - fence = dma_fence_get(man->move); - spin_unlock(&man->move_lock); - - if (!fence) - return 0; + spin_lock(&man->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { + fence = man->eviction_fences[i]; + if (!fence) + continue; - if (no_wait_gpu) { - ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY; - dma_fence_put(fence); - return ret; + if (no_wait_gpu) { + if (!dma_fence_is_signaled(fence)) { + spin_unlock(&man->eviction_lock); + return -EBUSY; + } + } else { + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); + } } + spin_unlock(&man->eviction_lock); - dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); - - ret = dma_resv_reserve_fences(bo->base.resv, 1); - dma_fence_put(fence); - return ret; + /* TODO: this call should be removed. */ + return dma_resv_reserve_fences(bo->base.resv, 1); } /** @@ -718,7 +720,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo, int i, ret; ticket = dma_resv_locking_ctx(bo->base.resv); - ret = dma_resv_reserve_fences(bo->base.resv, 1); + ret = dma_resv_reserve_fences(bo->base.resv, TTM_NUM_MOVE_FENCES); if (unlikely(ret)) return ret; @@ -757,7 +759,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo, return ret; } - ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu); + ret = ttm_bo_add_pipelined_eviction_fences(bo, man, ctx->no_wait_gpu); if (unlikely(ret)) { ttm_resource_free(bo, res); if (ret == -EBUSY) @@ -878,7 +880,8 @@ bounce: /* For backward compatibility with userspace */ if (ret == -ENOSPC) - return -ENOMEM; + return bo->bdev->alloc_flags & TTM_ALLOCATION_PROPAGATE_ENOSPC ? + ret : -ENOMEM; /* * We might need to add a TTM. diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h index 9d8b747a34db..e0d48eac74b0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_internal.h +++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h @@ -55,4 +55,6 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) return bo; } +void ttm_bo_put(struct ttm_buffer_object *bo); + #endif diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index acbbca9d5c92..2ff35d55e462 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -258,7 +258,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); - ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); + ret = dma_resv_reserve_fences(&fbo->base.base._resv, TTM_NUM_MOVE_FENCES); if (ret) { dma_resv_unlock(&fbo->base.base._resv); kfree(fbo); @@ -646,20 +646,44 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, { struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *from; + struct dma_fence *tmp; + int i; from = ttm_manager_type(bdev, bo->resource->mem_type); /** * BO doesn't have a TTM we need to bind/unbind. Just remember - * this eviction and free up the allocation + * this eviction and free up the allocation. + * The fence will be saved in the first free slot or in the slot + * already used to store a fence from the same context. Since + * drivers can't use more than TTM_NUM_MOVE_FENCES contexts for + * evictions we should always find a slot to use. */ - spin_lock(&from->move_lock); - if (!from->move || dma_fence_is_later(fence, from->move)) { - dma_fence_put(from->move); - from->move = dma_fence_get(fence); + spin_lock(&from->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { + tmp = from->eviction_fences[i]; + if (!tmp) + break; + if (fence->context != tmp->context) + continue; + if (dma_fence_is_later(fence, tmp)) { + dma_fence_put(tmp); + break; + } + goto unlock; + } + if (i < TTM_NUM_MOVE_FENCES) { + from->eviction_fences[i] = dma_fence_get(fence); + } else { + WARN(1, "not enough fence slots for all fence contexts"); + spin_unlock(&from->eviction_lock); + dma_fence_wait(fence, false); + goto end; } - spin_unlock(&from->move_lock); +unlock: + spin_unlock(&from->eviction_lock); +end: ttm_resource_free(bo, &bo->resource); } diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index c3e2fcbdd2cc..9a51afaf0749 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -31,6 +31,7 @@ #include <linux/export.h> #include <linux/mm.h> +#include <drm/ttm/ttm_allocation.h> #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_device.h> #include <drm/ttm/ttm_tt.h> @@ -198,8 +199,7 @@ EXPORT_SYMBOL(ttm_device_swapout); * @dev: The core kernel device pointer for DMA mappings and allocations. * @mapping: The address space to use for this bo. * @vma_manager: A pointer to a vma manager. - * @use_dma_alloc: If coherent DMA allocation API should be used. - * @use_dma32: If we should use GFP_DMA32 for device memory allocations. + * @alloc_flags: TTM_ALLOCATION_* flags. * * Initializes a struct ttm_device: * Returns: @@ -208,7 +208,7 @@ EXPORT_SYMBOL(ttm_device_swapout); int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs, struct device *dev, struct address_space *mapping, struct drm_vma_offset_manager *vma_manager, - bool use_dma_alloc, bool use_dma32) + unsigned int alloc_flags) { struct ttm_global *glob = &ttm_glob; int ret, nid; @@ -227,6 +227,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func return -ENOMEM; } + bdev->alloc_flags = alloc_flags; bdev->funcs = funcs; ttm_sys_man_init(bdev); @@ -236,7 +237,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func else nid = NUMA_NO_NODE; - ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32); + ttm_pool_init(&bdev->pool, dev, nid, alloc_flags); bdev->vma_manager = vma_manager; spin_lock_init(&bdev->lru_lock); diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index b3fffe7b5062..aa137ead5cc5 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -74,7 +74,8 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp) #endif /* CONFIG_UML */ #endif /* __i386__ || __x86_64__ */ #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ - defined(__powerpc__) || defined(__mips__) || defined(__loongarch__) + defined(__powerpc__) || defined(__mips__) || defined(__loongarch__) || \ + defined(__riscv) if (caching == ttm_write_combined) tmp = pgprot_writecombine(tmp); else diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index baf27c70a419..18b6db015619 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -48,6 +48,7 @@ #include <drm/ttm/ttm_bo.h> #include "ttm_module.h" +#include "ttm_pool_internal.h" #ifdef CONFIG_FAULT_INJECTION #include <linux/fault-inject.h> @@ -135,6 +136,7 @@ static DECLARE_RWSEM(pool_shrink_rwsem); static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, unsigned int order) { + const unsigned int beneficial_order = ttm_pool_beneficial_order(pool); unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; struct ttm_pool_dma *dma; struct page *p; @@ -148,7 +150,14 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_THISNODE; - if (!pool->use_dma_alloc) { + /* + * Do not add latency to the allocation path for allocations orders + * device tolds us do not bring them additional performance gains. + */ + if (beneficial_order && order > beneficial_order) + gfp_flags &= ~__GFP_DIRECT_RECLAIM; + + if (!ttm_pool_uses_dma_alloc(pool)) { p = alloc_pages_node(pool->nid, gfp_flags, order); if (p) p->private = order; @@ -200,7 +209,7 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, set_pages_wb(p, 1 << order); #endif - if (!pool || !pool->use_dma_alloc) { + if (!pool || !ttm_pool_uses_dma_alloc(pool)) { __free_pages(p, order); return; } @@ -243,7 +252,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, { dma_addr_t addr; - if (pool->use_dma_alloc) { + if (ttm_pool_uses_dma_alloc(pool)) { struct ttm_pool_dma *dma = (void *)p->private; addr = dma->addr; @@ -265,7 +274,7 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, unsigned int num_pages) { /* Unmapped while freeing the page */ - if (pool->use_dma_alloc) + if (ttm_pool_uses_dma_alloc(pool)) return; dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, @@ -339,7 +348,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, enum ttm_caching caching, unsigned int order) { - if (pool->use_dma_alloc) + if (ttm_pool_uses_dma_alloc(pool)) return &pool->caching[caching].orders[order]; #ifdef CONFIG_X86 @@ -348,7 +357,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, if (pool->nid != NUMA_NO_NODE) return &pool->caching[caching].orders[order]; - if (pool->use_dma32) + if (ttm_pool_uses_dma32(pool)) return &global_dma32_write_combined[order]; return &global_write_combined[order]; @@ -356,7 +365,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, if (pool->nid != NUMA_NO_NODE) return &pool->caching[caching].orders[order]; - if (pool->use_dma32) + if (ttm_pool_uses_dma32(pool)) return &global_dma32_uncached[order]; return &global_uncached[order]; @@ -396,7 +405,7 @@ static unsigned int ttm_pool_shrink(void) /* Return the allocation order based for a page */ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) { - if (pool->use_dma_alloc) { + if (ttm_pool_uses_dma_alloc(pool)) { struct ttm_pool_dma *dma = (void *)p->private; return dma->vaddr & ~PAGE_MASK; @@ -719,7 +728,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, if (ctx->gfp_retry_mayfail) gfp_flags |= __GFP_RETRY_MAYFAIL; - if (pool->use_dma32) + if (ttm_pool_uses_dma32(pool)) gfp_flags |= GFP_DMA32; else gfp_flags |= GFP_HIGHUSER; @@ -977,7 +986,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, return -EINVAL; if ((!ttm_backup_bytes_avail() && !flags->purge) || - pool->use_dma_alloc || ttm_tt_is_backed_up(tt)) + ttm_pool_uses_dma_alloc(pool) || ttm_tt_is_backed_up(tt)) return -EBUSY; #ifdef CONFIG_X86 @@ -1014,7 +1023,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, if (flags->purge) return shrunken; - if (pool->use_dma32) + if (ttm_pool_uses_dma32(pool)) gfp = GFP_DMA32; else gfp = GFP_HIGHUSER; @@ -1058,22 +1067,20 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, * @pool: the pool to initialize * @dev: device for DMA allocations and mappings * @nid: NUMA node to use for allocations - * @use_dma_alloc: true if coherent DMA alloc should be used - * @use_dma32: true if GFP_DMA32 should be used + * @alloc_flags: TTM_ALLOCATION_POOL_* flags * * Initialize the pool and its pool types. */ void ttm_pool_init(struct ttm_pool *pool, struct device *dev, - int nid, bool use_dma_alloc, bool use_dma32) + int nid, unsigned int alloc_flags) { unsigned int i, j; - WARN_ON(!dev && use_dma_alloc); + WARN_ON(!dev && ttm_pool_uses_dma_alloc(pool)); pool->dev = dev; pool->nid = nid; - pool->use_dma_alloc = use_dma_alloc; - pool->use_dma32 = use_dma32; + pool->alloc_flags = alloc_flags; for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { for (j = 0; j < NR_PAGE_ORDERS; ++j) { @@ -1239,7 +1246,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) { unsigned int i; - if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) { + if (!ttm_pool_uses_dma_alloc(pool) && pool->nid == NUMA_NO_NODE) { seq_puts(m, "unused\n"); return 0; } @@ -1250,7 +1257,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { if (!ttm_pool_select_type(pool, i, 0)) continue; - if (pool->use_dma_alloc) + if (ttm_pool_uses_dma_alloc(pool)) seq_puts(m, "DMA "); else seq_printf(m, "N%d ", pool->nid); diff --git a/drivers/gpu/drm/ttm/ttm_pool_internal.h b/drivers/gpu/drm/ttm/ttm_pool_internal.h new file mode 100644 index 000000000000..82c4b7e56a99 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_pool_internal.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* Copyright (c) 2025 Valve Corporation */ + +#ifndef _TTM_POOL_INTERNAL_H_ +#define _TTM_POOL_INTERNAL_H_ + +#include <drm/ttm/ttm_allocation.h> +#include <drm/ttm/ttm_pool.h> + +static inline bool ttm_pool_uses_dma_alloc(struct ttm_pool *pool) +{ + return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC; +} + +static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool) +{ + return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32; +} + +static inline bool ttm_pool_beneficial_order(struct ttm_pool *pool) +{ + return pool->alloc_flags & 0xff; +} + +#endif diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index e2c82ad07eb4..f5aa29dc6ec0 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -34,6 +34,7 @@ #include <drm/ttm/ttm_resource.h> #include <drm/ttm/ttm_tt.h> +#include <drm/drm_print.h> #include <drm/drm_util.h> /* Detach the cursor from the bulk move list*/ @@ -523,14 +524,15 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man, { unsigned i; - spin_lock_init(&man->move_lock); man->bdev = bdev; man->size = size; man->usage = 0; for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) INIT_LIST_HEAD(&man->lru[i]); - man->move = NULL; + spin_lock_init(&man->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) + man->eviction_fences[i] = NULL; } EXPORT_SYMBOL(ttm_resource_manager_init); @@ -551,7 +553,7 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, .no_wait_gpu = false, }; struct dma_fence *fence; - int ret; + int ret, i; do { ret = ttm_bo_evict_first(bdev, man, &ctx); @@ -561,18 +563,24 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, if (ret && ret != -ENOENT) return ret; - spin_lock(&man->move_lock); - fence = dma_fence_get(man->move); - spin_unlock(&man->move_lock); - - if (fence) { - ret = dma_fence_wait(fence, false); - dma_fence_put(fence); - if (ret) - return ret; + ret = 0; + + spin_lock(&man->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { + fence = man->eviction_fences[i]; + if (fence && !dma_fence_is_signaled(fence)) { + dma_fence_get(fence); + spin_unlock(&man->eviction_lock); + ret = dma_fence_wait(fence, false); + dma_fence_put(fence); + if (ret) + return ret; + spin_lock(&man->eviction_lock); + } } + spin_unlock(&man->eviction_lock); - return 0; + return ret; } EXPORT_SYMBOL(ttm_resource_manager_evict_all); @@ -587,6 +595,9 @@ uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) { uint64_t usage; + if (WARN_ON_ONCE(!man->bdev)) + return 0; + spin_lock(&man->bdev->lru_lock); usage = man->usage; spin_unlock(&man->bdev->lru_lock); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 506e257dfba8..611d20ab966d 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -40,12 +40,14 @@ #include <linux/shmem_fs.h> #include <drm/drm_cache.h> #include <drm/drm_device.h> +#include <drm/drm_print.h> #include <drm/drm_util.h> #include <drm/ttm/ttm_backup.h> #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_tt.h> #include "ttm_module.h" +#include "ttm_pool_internal.h" static unsigned long ttm_pages_limit; @@ -93,7 +95,8 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) * mapped TT pages need to be decrypted or otherwise the drivers * will end up sending encrypted mem to the gpu. */ - if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { + if (ttm_pool_uses_dma_alloc(&bdev->pool) && + cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { page_flags |= TTM_TT_FLAG_DECRYPTED; drm_info_once(ddev, "TT memory decryption enabled."); } @@ -378,7 +381,7 @@ int ttm_tt_populate(struct ttm_device *bdev, if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { atomic_long_add(ttm->num_pages, &ttm_pages_allocated); - if (bdev->pool.use_dma32) + if (ttm_pool_uses_dma32(&bdev->pool)) atomic_long_add(ttm->num_pages, &ttm_dma32_pages_allocated); } @@ -416,7 +419,7 @@ int ttm_tt_populate(struct ttm_device *bdev, error: if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { atomic_long_sub(ttm->num_pages, &ttm_pages_allocated); - if (bdev->pool.use_dma32) + if (ttm_pool_uses_dma32(&bdev->pool)) atomic_long_sub(ttm->num_pages, &ttm_dma32_pages_allocated); } @@ -439,7 +442,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { atomic_long_sub(ttm->num_pages, &ttm_pages_allocated); - if (bdev->pool.use_dma32) + if (ttm_pool_uses_dma32(&bdev->pool)) atomic_long_sub(ttm->num_pages, &ttm_dma32_pages_allocated); } diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index 37bdd976ae59..26b6c65ef6fd 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -21,6 +21,7 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "tve200_drm.h" diff --git a/drivers/gpu/drm/udl/udl_edid.c b/drivers/gpu/drm/udl/udl_edid.c index 12f48ae17073..af4cff2a7c51 100644 --- a/drivers/gpu/drm/udl/udl_edid.c +++ b/drivers/gpu/drm/udl/udl_edid.c @@ -4,6 +4,7 @@ #include <drm/drm_drv.h> #include <drm/drm_edid.h> +#include <drm/drm_print.h> #include "udl_drv.h" #include "udl_edid.h" diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index c41476ddde68..d9547f5117b9 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -18,6 +18,8 @@ #include <linux/dma-buf.h> #include <linux/vmalloc.h> +#include <drm/drm_print.h> + #include "v3d_drv.h" #include "uapi/drm/v3d_drm.h" diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index 7e789e181af0..89f24eec62a7 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -8,6 +8,7 @@ #include <linux/string_helpers.h> #include <drm/drm_debugfs.h> +#include <drm/drm_print.h> #include "v3d_drv.h" #include "v3d_regs.h" diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index c5a3bbbc74c5..e8a46c8bad8a 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -25,6 +25,7 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <uapi/drm/v3d_drm.h> #include "v3d_drv.h" diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index bb110d35f749..5a180dc6c452 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -11,6 +11,7 @@ #include <linux/uaccess.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "v3d_drv.h" #include "v3d_regs.h" diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c index c1a30166c099..bf351fc0d488 100644 --- a/drivers/gpu/drm/v3d/v3d_gemfs.c +++ b/drivers/gpu/drm/v3d/v3d_gemfs.c @@ -5,6 +5,8 @@ #include <linux/mount.h> #include <linux/fs_context.h> +#include <drm/drm_print.h> + #include "v3d_drv.h" void v3d_gemfs_init(struct v3d_dev *v3d) diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index 31ecc5b4ba5a..b55880fd6c50 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -16,6 +16,8 @@ #include <linux/platform_device.h> #include <linux/sched/clock.h> +#include <drm/drm_print.h> + #include "v3d_drv.h" #include "v3d_regs.h" #include "v3d_trace.h" diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 0ec06bfbbebb..0867250db7a6 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -21,6 +21,7 @@ #include <linux/sched/clock.h> #include <linux/kthread.h> +#include <drm/drm_print.h> #include <drm/drm_syncobj.h> #include "v3d_drv.h" diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index f3652e90683c..7de5a95ee7ca 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -4,6 +4,7 @@ * Copyright (C) 2023 Raspberry Pi */ +#include <drm/drm_print.h> #include <drm/drm_syncobj.h> #include "v3d_drv.h" diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c index 903a6c48ee8b..37c66668df57 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_irq.c +++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c @@ -12,6 +12,7 @@ #include <linux/pci.h> #include <drm/drm_drv.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "vbox_drv.h" diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c index 7f686a0190e6..aa6664542b20 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_main.c +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -12,6 +12,7 @@ #include <linux/vbox_err.h> #include <drm/drm_damage_helper.h> +#include <drm/drm_print.h> #include "vbox_drv.h" #include "vboxvideo_guest.h" diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index 9ff3bade9795..d363c3f0afdf 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -22,6 +22,7 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "hgsmi_channels.h" @@ -262,8 +263,8 @@ static int vbox_primary_atomic_check(struct drm_plane *plane, struct drm_crtc_state *crtc_state = NULL; if (new_state->crtc) { - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; } @@ -344,8 +345,8 @@ static int vbox_cursor_atomic_check(struct drm_plane *plane, int ret; if (new_state->crtc) { - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; } diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c index dc24c2172fd4..19bf8d023dc8 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c +++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c @@ -8,6 +8,7 @@ */ #include <linux/pci.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include "vbox_drv.h" int vbox_mm_init(struct vbox_private *vbox) diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 123ab0ce1781..bb8c40be3250 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig @@ -35,6 +35,7 @@ config DRM_VC4_HDMI_CEC bool "Broadcom VC4 HDMI CEC Support" depends on DRM_VC4 select CEC_CORE + select DRM_DISPLAY_HDMI_CEC_HELPER help Choose this option if you have a Broadcom VC4 GPU and want to use CEC. diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 4aaa587be3a5..46b4474ac41d 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -19,6 +19,7 @@ #include <linux/dma-buf.h> #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include "vc4_drv.h" #include "uapi/drm/vc4_drm.h" diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c index fac624a663ea..e765904e13f3 100644 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c @@ -4,6 +4,7 @@ */ #include <drm/drm_drv.h> +#include <drm/drm_print.h> #include <linux/seq_file.h> #include <linux/circ_buf.h> diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index 960550c166d9..2afc88394d64 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -17,6 +17,7 @@ #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <linux/clk.h> diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index c7cb1e3a6434..3846996f9028 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -36,6 +36,7 @@ #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include <soc/bcm2835/raspberrypi-firmware.h> diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 458e5d987964..deeeaebc702f 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -36,6 +36,7 @@ #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 255e5817618e..ab16164b5eda 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -30,6 +30,7 @@ #include <linux/dma-fence-array.h> #include <drm/drm_exec.h> +#include <drm/drm_print.h> #include <drm/drm_syncobj.h> #include "uapi/drm/vc4_drm.h" diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 07c91b450f93..1798d1156d10 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -32,12 +32,14 @@ */ #include <drm/display/drm_hdmi_audio_helper.h> +#include <drm/display/drm_hdmi_cec_helper.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/display/drm_hdmi_state_helper.h> #include <drm/display/drm_scdc_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <linux/clk.h> @@ -375,14 +377,6 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi, drm_atomic_helper_connector_hdmi_hotplug(connector, status); - if (status == connector_status_disconnected) { - cec_phys_addr_invalidate(vc4_hdmi->cec_adap); - return; - } - - cec_s_phys_addr(vc4_hdmi->cec_adap, - connector->display_info.source_physical_address, false); - if (status != connector_status_connected) return; @@ -2384,8 +2378,8 @@ static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv) struct vc4_hdmi *vc4_hdmi = priv; if (vc4_hdmi->cec_rx_msg.len) - cec_received_msg(vc4_hdmi->cec_adap, - &vc4_hdmi->cec_rx_msg); + drm_connector_hdmi_cec_received_msg(&vc4_hdmi->connector, + &vc4_hdmi->cec_rx_msg); return IRQ_HANDLED; } @@ -2395,15 +2389,17 @@ static irqreturn_t vc4_cec_irq_handler_tx_thread(int irq, void *priv) struct vc4_hdmi *vc4_hdmi = priv; if (vc4_hdmi->cec_tx_ok) { - cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK, - 0, 0, 0, 0); + drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector, + CEC_TX_STATUS_OK, + 0, 0, 0, 0); } else { /* * This CEC implementation makes 1 retry, so if we * get a NACK, then that means it made 2 attempts. */ - cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_NACK, - 0, 2, 0, 0); + drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector, + CEC_TX_STATUS_NACK, + 0, 2, 0, 0); } return IRQ_HANDLED; } @@ -2560,9 +2556,9 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv) return ret; } -static int vc4_hdmi_cec_enable(struct cec_adapter *adap) +static int vc4_hdmi_cec_enable(struct drm_connector *connector) { - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct drm_device *drm = vc4_hdmi->connector.dev; /* clock period in microseconds */ const u32 usecs = 1000000 / CEC_CLOCK_FREQ; @@ -2627,9 +2623,9 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap) return 0; } -static int vc4_hdmi_cec_disable(struct cec_adapter *adap) +static int vc4_hdmi_cec_disable(struct drm_connector *connector) { - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct drm_device *drm = vc4_hdmi->connector.dev; unsigned long flags; int idx; @@ -2663,17 +2659,17 @@ static int vc4_hdmi_cec_disable(struct cec_adapter *adap) return 0; } -static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) +static int vc4_hdmi_cec_adap_enable(struct drm_connector *connector, bool enable) { if (enable) - return vc4_hdmi_cec_enable(adap); + return vc4_hdmi_cec_enable(connector); else - return vc4_hdmi_cec_disable(adap); + return vc4_hdmi_cec_disable(connector); } -static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) +static int vc4_hdmi_cec_adap_log_addr(struct drm_connector *connector, u8 log_addr) { - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct drm_device *drm = vc4_hdmi->connector.dev; unsigned long flags; int idx; @@ -2699,10 +2695,10 @@ static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) return 0; } -static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, +static int vc4_hdmi_cec_adap_transmit(struct drm_connector *connector, u8 attempts, u32 signal_free_time, struct cec_msg *msg) { - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct drm_device *dev = vc4_hdmi->connector.dev; unsigned long flags; u32 val; @@ -2745,84 +2741,65 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, return 0; } -static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = { - .adap_enable = vc4_hdmi_cec_adap_enable, - .adap_log_addr = vc4_hdmi_cec_adap_log_addr, - .adap_transmit = vc4_hdmi_cec_adap_transmit, -}; - -static void vc4_hdmi_cec_release(void *ptr) -{ - struct vc4_hdmi *vc4_hdmi = ptr; - - cec_unregister_adapter(vc4_hdmi->cec_adap); - vc4_hdmi->cec_adap = NULL; -} - -static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) +static int vc4_hdmi_cec_init(struct drm_connector *connector) { - struct cec_connector_info conn_info; + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct platform_device *pdev = vc4_hdmi->pdev; struct device *dev = &pdev->dev; int ret; - if (!of_property_present(dev->of_node, "interrupts")) { - dev_warn(dev, "'interrupts' DT property is missing, no CEC\n"); - return 0; - } - - vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, - vc4_hdmi, - vc4_hdmi->variant->card_name, - CEC_CAP_DEFAULTS | - CEC_CAP_CONNECTOR_INFO, 1); - ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap); - if (ret < 0) - return ret; - - cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector); - cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info); - if (vc4_hdmi->variant->external_irq_controller) { ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-rx"), vc4_cec_irq_handler_rx_bare, vc4_cec_irq_handler_rx_thread, 0, "vc4 hdmi cec rx", vc4_hdmi); if (ret) - goto err_delete_cec_adap; + return ret; ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-tx"), vc4_cec_irq_handler_tx_bare, vc4_cec_irq_handler_tx_thread, 0, "vc4 hdmi cec tx", vc4_hdmi); if (ret) - goto err_delete_cec_adap; + return ret; } else { ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), vc4_cec_irq_handler, vc4_cec_irq_handler_thread, 0, "vc4 hdmi cec", vc4_hdmi); if (ret) - goto err_delete_cec_adap; + return ret; } - ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev); - if (ret < 0) - goto err_delete_cec_adap; + return 0; +} + +static const struct drm_connector_hdmi_cec_funcs vc4_hdmi_cec_funcs = { + .init = vc4_hdmi_cec_init, + .enable = vc4_hdmi_cec_adap_enable, + .log_addr = vc4_hdmi_cec_adap_log_addr, + .transmit = vc4_hdmi_cec_adap_transmit, +}; + +static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi) +{ + struct platform_device *pdev = vc4_hdmi->pdev; + struct device *dev = &pdev->dev; + + if (!of_property_present(dev->of_node, "interrupts")) { + dev_warn(dev, "'interrupts' DT property is missing, no CEC\n"); + return 0; + } /* - * NOTE: Strictly speaking, we should probably use a DRM-managed - * registration there to avoid removing the CEC adapter by the - * time the DRM driver doesn't have any user anymore. + * NOTE: the CEC adapter will be unregistered by drmm cleanup from + * drm_managed_release(), which is called from drm_dev_release() + * during device unbind. * * However, the CEC framework already cleans up the CEC adapter * only when the last user has closed its file descriptor, so we * don't need to handle it in DRM. * - * By the time the device-managed hook is executed, we will give - * up our reference to the CEC adapter and therefore don't - * really care when it's actually freed. - * * There's still a problematic sequence: if we unregister our * CEC adapter, but the userspace keeps a handle on the CEC * adapter but not the DRM device for some reason. In such a @@ -2833,19 +2810,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) * the CEC framework already handles this too, by calling * cec_is_registered() in cec_ioctl() and cec_poll(). */ - ret = devm_add_action_or_reset(dev, vc4_hdmi_cec_release, vc4_hdmi); - if (ret) - return ret; - - return 0; - -err_delete_cec_adap: - cec_delete_adapter(vc4_hdmi->cec_adap); - - return ret; + return drmm_connector_hdmi_cec_register(&vc4_hdmi->connector, + &vc4_hdmi_cec_funcs, + vc4_hdmi->variant->card_name, + 1, + &pdev->dev); } #else -static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) +static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi) { return 0; } @@ -3250,7 +3222,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) if (ret) goto err_put_runtime_pm; - ret = vc4_hdmi_cec_init(vc4_hdmi); + ret = vc4_hdmi_cec_register(vc4_hdmi); if (ret) goto err_put_runtime_pm; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index a31157c99bee..8d069718df00 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -147,7 +147,6 @@ struct vc4_hdmi { */ bool disable_wifi_frequencies; - struct cec_adapter *cec_adap; struct cec_msg cec_rx_msg; bool cec_tx_ok; bool cec_irq_was_rx; diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 4811d794001f..ee8d0738501b 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -26,6 +26,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include <soc/bcm2835/raspberrypi-firmware.h> diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index 69b399f3b802..63e88f90eef7 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -48,6 +48,7 @@ #include <linux/platform_device.h> #include <drm/drm_drv.h> +#include <drm/drm_print.h> #include "vc4_drv.h" #include "vc4_regs.h" diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 8f983edb81ff..e563c1210937 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -19,6 +19,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index f1342f917cf7..1ac80c0b258f 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -9,6 +9,8 @@ * The V3D block provides 16 hardware counters which can count various events. */ +#include <drm/drm_print.h> + #include "vc4_drv.h" #include "vc4_regs.h" diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 056d344c5411..f00d4076ba07 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -24,6 +24,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> #include "uapi/drm/vc4_drm.h" @@ -497,8 +498,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) u32 v_subsample = fb->format->vsub; int ret; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); if (!crtc_state) { DRM_DEBUG_KMS("Invalid crtc state\n"); return -EINVAL; @@ -875,8 +875,7 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) unsigned int vscale_factor; vc4_state = to_vc4_plane_state(state); - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode); /* The HVS is able to process 2 pixels/cycle when scaling the source, diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 14079853338e..edc471e71c0e 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -35,6 +35,8 @@ * actually fairly low. */ +#include <drm/drm_print.h> + #include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" #include "vc4_packet.h" diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index 42acac05fe47..9082902100e4 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -21,6 +21,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include <drm/drm_writeback.h> diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index bb09df5000bd..3ffe09bc89d2 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -10,6 +10,8 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <drm/drm_print.h> + #include "vc4_drv.h" #include "vc4_regs.h" diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index 1e7bdda55698..545c4c3608f5 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -43,6 +43,8 @@ * to use) happens. */ +#include <drm/drm_print.h> + #include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" #include "vc4_packet.h" diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index 2d74e786914c..b50b6cdac3f4 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -41,6 +41,8 @@ * this validation is only performed at BO creation time. */ +#include <drm/drm_print.h> + #include "vc4_drv.h" #include "vc4_qpu_defines.h" diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c index 06d702e879b0..b84fad2a5b23 100644 --- a/drivers/gpu/drm/vc4/vc4_vec.c +++ b/drivers/gpu/drm/vc4/vc4_vec.c @@ -17,6 +17,7 @@ #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_panel.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <linux/clk.h> diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index fd76730fd38c..07db319c3d7f 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -79,7 +79,7 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile, dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1); - timer_setup(&fence->timer, vgem_fence_timeout, 0); + timer_setup(&fence->timer, vgem_fence_timeout, TIMER_IRQSAFE); /* We force the fence to expire within 10s to prevent driver hangs */ mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT); diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index 853dd9aa397e..3a68a16b58ae 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -27,6 +27,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include "virtgpu_drv.h" diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index c3315935d8bc..6a962c1d6e95 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -30,8 +30,11 @@ #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #include "virtgpu_drv.h" @@ -55,6 +58,7 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + DRM_CRTC_VBLANK_TIMER_FUNCS, }; static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = { @@ -99,6 +103,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc) static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { + drm_crtc_vblank_on(crtc); } static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, @@ -108,6 +113,8 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); + drm_crtc_vblank_off(crtc); + virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0); virtio_gpu_notify(vgdev); } @@ -121,9 +128,10 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, - crtc); + struct drm_device *dev = crtc->dev; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); + struct drm_pending_vblank_event *event; /* * virtio-gpu can't do modeset and plane update operations @@ -133,6 +141,20 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, */ if (drm_atomic_crtc_needs_modeset(crtc_state)) output->needs_modeset = true; + + spin_lock_irq(&dev->event_lock); + + event = crtc_state->event; + crtc_state->event = NULL; + + if (event) { + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + } + + spin_unlock_irq(&dev->event_lock); } static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { @@ -257,6 +279,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) struct drm_encoder *encoder = &output->enc; struct drm_crtc *crtc = &output->crtc; struct drm_plane *primary, *cursor; + int ret; output->index = index; if (index == 0) { @@ -271,8 +294,10 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index); if (IS_ERR(cursor)) return PTR_ERR(cursor); - drm_crtc_init_with_planes(dev, crtc, primary, cursor, - &virtio_gpu_crtc_funcs, NULL); + ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, + &virtio_gpu_crtc_funcs, NULL); + if (ret) + return ret; drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, @@ -356,6 +381,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) for (i = 0 ; i < vgdev->num_scanouts; ++i) vgdev_output_init(vgdev, i); + ret = drm_vblank_init(vgdev->ddev, vgdev->num_scanouts); + if (ret) + return ret; + drm_mode_config_reset(vgdev->ddev); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 71c6ccad4b99..a5ce96fb8a1d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -39,6 +39,7 @@ #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include "virtgpu_drv.h" diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 1c15cbf326b7..f3594695bb82 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -29,6 +29,7 @@ #include <drm/drm_file.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "virtgpu_drv.h" diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index e6363c887500..4270bfede7b9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -26,6 +26,8 @@ #include <linux/dma-mapping.h> #include <linux/moduleparam.h> +#include <drm/drm_print.h> + #include "virtgpu_drv.h" static int virtio_gpu_virglrenderer_workaround = 1; diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 29e4b458ae57..a7863f8ee4ee 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -30,6 +30,7 @@ #include <linux/virtio_dma_buf.h> #include <drm/drm_managed.h> #include <drm/drm_panic.h> +#include <drm/drm_print.h> #include "virtgpu_drv.h" diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 8181b22b9b46..0c194b4e9488 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -32,6 +32,7 @@ #include <linux/virtio_ring.h> #include <drm/drm_edid.h> +#include <drm/drm_print.h> #include "virtgpu_drv.h" #include "virtgpu_trace.h" diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig index 3c02f928ffe6..3977bbb99f7d 100644 --- a/drivers/gpu/drm/vkms/Kconfig +++ b/drivers/gpu/drm/vkms/Kconfig @@ -7,6 +7,7 @@ config DRM_VKMS select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select CRC32 + select CONFIGFS_FS default n help Virtual Kernel Mode-Setting (VKMS) is used for testing or for diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile index d657865e573f..9bb264091c38 100644 --- a/drivers/gpu/drm/vkms/Makefile +++ b/drivers/gpu/drm/vkms/Makefile @@ -8,7 +8,10 @@ vkms-y := \ vkms_composer.o \ vkms_writeback.o \ vkms_connector.o \ - vkms_config.o + vkms_config.o \ + vkms_configfs.o \ + vkms_colorop.o \ + vkms_luts.o obj-$(CONFIG_DRM_VKMS) += vkms.o obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/ diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile index 5750f0bd9d40..d4d9ba8d4c54 100644 --- a/drivers/gpu/drm/vkms/tests/Makefile +++ b/drivers/gpu/drm/vkms/tests/Makefile @@ -2,6 +2,7 @@ vkms-kunit-tests-y := \ vkms_config_test.o \ - vkms_format_test.o + vkms_format_test.o \ + vkms_color_test.o obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms-kunit-tests.o diff --git a/drivers/gpu/drm/vkms/tests/vkms_color_test.c b/drivers/gpu/drm/vkms/tests/vkms_color_test.c new file mode 100644 index 000000000000..1a1c7cac2f15 --- /dev/null +++ b/drivers/gpu/drm/vkms/tests/vkms_color_test.c @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <kunit/test.h> + +#include <drm/drm_fixed.h> +#include <drm/drm_mode.h> +#include "../vkms_composer.h" +#include "../vkms_drv.h" +#include "../vkms_luts.h" + +#define TEST_LUT_SIZE 16 + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +static struct drm_color_lut test_linear_array[TEST_LUT_SIZE] = { + { 0x0, 0x0, 0x0, 0 }, + { 0x1111, 0x1111, 0x1111, 0 }, + { 0x2222, 0x2222, 0x2222, 0 }, + { 0x3333, 0x3333, 0x3333, 0 }, + { 0x4444, 0x4444, 0x4444, 0 }, + { 0x5555, 0x5555, 0x5555, 0 }, + { 0x6666, 0x6666, 0x6666, 0 }, + { 0x7777, 0x7777, 0x7777, 0 }, + { 0x8888, 0x8888, 0x8888, 0 }, + { 0x9999, 0x9999, 0x9999, 0 }, + { 0xaaaa, 0xaaaa, 0xaaaa, 0 }, + { 0xbbbb, 0xbbbb, 0xbbbb, 0 }, + { 0xcccc, 0xcccc, 0xcccc, 0 }, + { 0xdddd, 0xdddd, 0xdddd, 0 }, + { 0xeeee, 0xeeee, 0xeeee, 0 }, + { 0xffff, 0xffff, 0xffff, 0 }, +}; + +/* lerp test parameters */ +struct vkms_color_test_lerp_params { + s64 t; + __u16 a; + __u16 b; + __u16 expected; +}; + +/* lerp test cases */ +static const struct vkms_color_test_lerp_params color_test_lerp_cases[] = { + /* Half-way round down */ + { 0x80000000 - 1, 0x0, 0x10, 0x8 }, + { 0x80000000 - 1, 0x1, 0x10, 0x8 }, /* Odd a */ + { 0x80000000 - 1, 0x1, 0xf, 0x8 }, /* Odd b */ + { 0x80000000 - 1, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x80000000 - 1, 0x10, 0x11, 0x10 }, /* b = a + 1*/ + /* Half-way round up */ + { 0x80000000, 0x0, 0x10, 0x8 }, + { 0x80000000, 0x1, 0x10, 0x9 }, /* Odd a */ + { 0x80000000, 0x1, 0xf, 0x8 }, /* Odd b */ + { 0x80000000, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x80000000, 0x10, 0x11, 0x11 }, /* b = a + 1*/ + /* t = 0.0 */ + { 0x0, 0x0, 0x10, 0x0 }, + { 0x0, 0x1, 0x10, 0x1 }, /* Odd a */ + { 0x0, 0x1, 0xf, 0x1 }, /* Odd b */ + { 0x0, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x0, 0x10, 0x11, 0x10 }, /* b = a + 1*/ + /* t = 1.0 */ + { 0x100000000, 0x0, 0x10, 0x10 }, + { 0x100000000, 0x1, 0x10, 0x10 }, /* Odd a */ + { 0x100000000, 0x1, 0xf, 0xf }, /* Odd b */ + { 0x100000000, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x100000000, 0x10, 0x11, 0x11 }, /* b = a + 1*/ + /* t = 0.0 + 1 */ + { 0x0 + 1, 0x0, 0x10, 0x0 }, + { 0x0 + 1, 0x1, 0x10, 0x1 }, /* Odd a */ + { 0x0 + 1, 0x1, 0xf, 0x1 }, /* Odd b */ + { 0x0 + 1, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x0 + 1, 0x10, 0x11, 0x10 }, /* b = a + 1*/ + /* t = 1.0 - 1 */ + { 0x100000000 - 1, 0x0, 0x10, 0x10 }, + { 0x100000000 - 1, 0x1, 0x10, 0x10 }, /* Odd a */ + { 0x100000000 - 1, 0x1, 0xf, 0xf }, /* Odd b */ + { 0x100000000 - 1, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x100000000 - 1, 0x10, 0x11, 0x11 }, /* b = a + 1*/ + /* t chosen to verify the flipping point of result a (or b) to a+1 (or b-1) */ + { 0x80000000 - 1, 0x0, 0x1, 0x0 }, + { 0x80000000, 0x0, 0x1, 0x1 }, +}; + +static const struct vkms_color_lut test_linear_lut = { + .base = test_linear_array, + .lut_length = TEST_LUT_SIZE, + .channel_value2index_ratio = 0xf000fll +}; + +static void vkms_color_test_get_lut_index(struct kunit *test) +{ + s64 lut_index; + int i; + + lut_index = get_lut_index(&test_linear_lut, test_linear_array[0].red); + KUNIT_EXPECT_EQ(test, drm_fixp2int(lut_index), 0); + + for (i = 0; i < TEST_LUT_SIZE; i++) { + lut_index = get_lut_index(&test_linear_lut, test_linear_array[i].red); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(lut_index), i); + } + + KUNIT_EXPECT_EQ(test, drm_fixp2int(get_lut_index(&srgb_eotf, 0x0)), 0x0); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x0)), 0x0); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x101)), 0x1); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x202)), 0x2); + + KUNIT_EXPECT_EQ(test, drm_fixp2int(get_lut_index(&srgb_inv_eotf, 0x0)), 0x0); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x0)), 0x0); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x101)), 0x1); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x202)), 0x2); + + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0xfefe)), 0xfe); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0xffff)), 0xff); +} + +static void vkms_color_test_lerp(struct kunit *test) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(color_test_lerp_cases); i++) { + const struct vkms_color_test_lerp_params *params = &color_test_lerp_cases[i]; + + KUNIT_EXPECT_EQ(test, lerp_u16(params->a, params->b, params->t), params->expected); + } +} + +static void vkms_color_test_linear(struct kunit *test) +{ + for (int i = 0; i < LUT_SIZE; i++) { + int linear = apply_lut_to_channel_value(&linear_eotf, i * 0x101, LUT_RED); + + KUNIT_EXPECT_EQ(test, DIV_ROUND_CLOSEST(linear, 0x101), i); + } +} + +static void vkms_color_srgb_inv_srgb(struct kunit *test) +{ + u16 srgb, final; + + for (int i = 0; i < LUT_SIZE; i++) { + srgb = apply_lut_to_channel_value(&srgb_eotf, i * 0x101, LUT_RED); + final = apply_lut_to_channel_value(&srgb_inv_eotf, srgb, LUT_RED); + + KUNIT_EXPECT_GE(test, final / 0x101, i - 1); + KUNIT_EXPECT_LE(test, final / 0x101, i + 1); + } +} + +#define FIXPT_HALF (DRM_FIXED_ONE >> 1) +#define FIXPT_QUARTER (DRM_FIXED_ONE >> 2) + +static const struct drm_color_ctm_3x4 test_matrix_3x4_50_desat = { { + FIXPT_HALF, FIXPT_QUARTER, FIXPT_QUARTER, 0, + FIXPT_QUARTER, FIXPT_HALF, FIXPT_QUARTER, 0, + FIXPT_QUARTER, FIXPT_QUARTER, FIXPT_HALF, 0 +} }; + +static void vkms_color_ctm_3x4_50_desat(struct kunit *test) +{ + struct pixel_argb_s32 ref, out; + + /* full white */ + ref.a = 0xffff; + ref.r = 0xffff; + ref.g = 0xffff; + ref.b = 0xffff; + + memcpy(&out, &ref, sizeof(out)); + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); + + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); + + /* full black */ + ref.a = 0xffff; + ref.r = 0x0; + ref.g = 0x0; + ref.b = 0x0; + + memcpy(&out, &ref, sizeof(out)); + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); + + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); + + /* 50% grey */ + ref.a = 0xffff; + ref.r = 0x8000; + ref.g = 0x8000; + ref.b = 0x8000; + + memcpy(&out, &ref, sizeof(out)); + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); + + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); + + /* full red to 50% desat */ + ref.a = 0xffff; + ref.r = 0x8000; + ref.g = 0x4000; + ref.b = 0x4000; + + out.a = 0xffff; + out.r = 0xffff; + out.g = 0x0; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); + + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); +} + +/* + * BT.709 encoding matrix + * + * Values printed from within IGT when converting + * igt_matrix_3x4_bt709_enc to the fixed-point format expected + * by DRM/KMS. + */ +static const struct drm_color_ctm_3x4 test_matrix_3x4_bt709_enc = { { + 0x00000000366cf400ull, 0x00000000b7175900ull, 0x0000000127bb300ull, 0, + 0x800000001993b3a0ull, 0x800000005609fe80ull, 0x000000006f9db200ull, 0, + 0x000000009d70a400ull, 0x800000008f011100ull, 0x800000000e6f9330ull, 0 +} }; + +static void vkms_color_ctm_3x4_bt709(struct kunit *test) +{ + struct pixel_argb_s32 out; + + /* full white to bt709 */ + out.a = 0xffff; + out.r = 0xffff; + out.g = 0xffff; + out.b = 0xffff; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 255 */ + KUNIT_EXPECT_GT(test, out.r, 0xfe00); + KUNIT_EXPECT_LT(test, out.r, 0x10000); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x0100); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x0100); + + /* full black to bt709 */ + out.a = 0xffff; + out.r = 0x0; + out.g = 0x0; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 0 */ + KUNIT_EXPECT_LT(test, out.r, 0x100); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x0100); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x0100); + + /* gray to bt709 */ + out.a = 0xffff; + out.r = 0x7fff; + out.g = 0x7fff; + out.b = 0x7fff; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 127 */ + KUNIT_EXPECT_GT(test, out.r, 0x7e00); + KUNIT_EXPECT_LT(test, out.r, 0x8000); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x0100); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x0100); + + /* == red 255 - bt709 enc == */ + out.a = 0xffff; + out.r = 0xffff; + out.g = 0x0; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 54 */ + KUNIT_EXPECT_GT(test, out.r, 0x3500); + KUNIT_EXPECT_LT(test, out.r, 0x3700); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x0100); + + /* V 157 */ + KUNIT_EXPECT_GT(test, out.b, 0x9C00); + KUNIT_EXPECT_LT(test, out.b, 0x9E00); + + /* == green 255 - bt709 enc == */ + out.a = 0xffff; + out.r = 0x0; + out.g = 0xffff; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 182 */ + KUNIT_EXPECT_GT(test, out.r, 0xB500); + KUNIT_EXPECT_LT(test, out.r, 0xB780); /* laxed by half*/ + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x0100); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x0100); + + /* == blue 255 - bt709 enc == */ + out.a = 0xffff; + out.r = 0x0; + out.g = 0x0; + out.b = 0xffff; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 18 */ + KUNIT_EXPECT_GT(test, out.r, 0x1100); + KUNIT_EXPECT_LT(test, out.r, 0x1300); + + /* U 111 */ + KUNIT_EXPECT_GT(test, out.g, 0x6E00); + KUNIT_EXPECT_LT(test, out.g, 0x7000); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x0100); + + /* == red 140 - bt709 enc == */ + out.a = 0xffff; + out.r = 0x8c8c; + out.g = 0x0; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 30 */ + KUNIT_EXPECT_GT(test, out.r, 0x1D00); + KUNIT_EXPECT_LT(test, out.r, 0x1F00); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x100); + + /* V 87 */ + KUNIT_EXPECT_GT(test, out.b, 0x5600); + KUNIT_EXPECT_LT(test, out.b, 0x5800); + + /* == green 140 - bt709 enc == */ + out.a = 0xffff; + out.r = 0x0; + out.g = 0x8c8c; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 30 */ + KUNIT_EXPECT_GT(test, out.r, 0x6400); + KUNIT_EXPECT_LT(test, out.r, 0x6600); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x100); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x100); + + /* == blue 140 - bt709 enc == */ + out.a = 0xffff; + out.r = 0x0; + out.g = 0x0; + out.b = 0x8c8c; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 30 */ + KUNIT_EXPECT_GT(test, out.r, 0x900); + KUNIT_EXPECT_LT(test, out.r, 0xB00); + + /* U 61 */ + KUNIT_EXPECT_GT(test, out.g, 0x3C00); + KUNIT_EXPECT_LT(test, out.g, 0x3E00); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x100); +} + +static struct kunit_case vkms_color_test_cases[] = { + KUNIT_CASE(vkms_color_test_get_lut_index), + KUNIT_CASE(vkms_color_test_lerp), + KUNIT_CASE(vkms_color_test_linear), + KUNIT_CASE(vkms_color_srgb_inv_srgb), + KUNIT_CASE(vkms_color_ctm_3x4_50_desat), + KUNIT_CASE(vkms_color_ctm_3x4_bt709), + {} +}; + +static struct kunit_suite vkms_color_test_suite = { + .name = "vkms-color", + .test_cases = vkms_color_test_cases, +}; + +kunit_test_suite(vkms_color_test_suite); + +MODULE_DESCRIPTION("Kunit test for VKMS LUT handling"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index b0d78a81d2df..1e4ea1863420 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -83,6 +83,7 @@ struct default_config_case { bool enable_cursor; bool enable_writeback; bool enable_overlay; + bool enable_plane_pipeline; }; static void vkms_config_test_empty_config(struct kunit *test) @@ -108,14 +109,22 @@ static void vkms_config_test_empty_config(struct kunit *test) } static struct default_config_case default_config_cases[] = { - { false, false, false }, - { true, false, false }, - { true, true, false }, - { true, false, true }, - { false, true, false }, - { false, true, true }, - { false, false, true }, - { true, true, true }, + { false, false, false, false }, + { true, false, false, false }, + { true, true, false, false }, + { true, false, true, false }, + { false, true, false, false }, + { false, true, true, false }, + { false, false, true, false }, + { true, true, true, false }, + { false, false, false, true }, + { true, false, false, true }, + { true, true, false, true }, + { true, false, true, true }, + { false, true, false, true }, + { false, true, true, true }, + { false, false, true, true }, + { true, true, true, true }, }; KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL); @@ -132,11 +141,15 @@ static void vkms_config_test_default_config(struct kunit *test) config = vkms_config_default_create(params->enable_cursor, params->enable_writeback, - params->enable_overlay); + params->enable_overlay, + params->enable_plane_pipeline); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); /* Planes */ vkms_config_for_each_plane(config, plane_cfg) { + KUNIT_EXPECT_EQ(test, + vkms_config_plane_get_default_pipeline(plane_cfg), + params->enable_plane_pipeline); switch (vkms_config_plane_get_type(plane_cfg)) { case DRM_PLANE_TYPE_PRIMARY: n_primaries++; @@ -368,7 +381,7 @@ static void vkms_config_test_invalid_plane_number(struct kunit *test) struct vkms_config_plane *plane_cfg; int n; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); /* Invalid: No planes */ @@ -393,7 +406,7 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) struct vkms_config_encoder *encoder_cfg; int err; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); plane_cfg = get_first_plane(config); @@ -474,7 +487,7 @@ static void vkms_config_test_valid_plane_possible_crtcs(struct kunit *test) struct vkms_config_plane *plane_cfg; struct vkms_config_crtc *crtc_cfg; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); plane_cfg = get_first_plane(config); @@ -493,7 +506,7 @@ static void vkms_config_test_invalid_crtc_number(struct kunit *test) struct vkms_config_crtc *crtc_cfg; int n; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); /* Invalid: No CRTCs */ @@ -516,7 +529,7 @@ static void vkms_config_test_invalid_encoder_number(struct kunit *test) struct vkms_config_encoder *encoder_cfg; int n; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); /* Invalid: No encoders */ @@ -541,7 +554,7 @@ static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test) struct vkms_config_encoder *encoder_cfg; int err; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); crtc_cfg1 = get_first_crtc(config); @@ -587,7 +600,7 @@ static void vkms_config_test_invalid_connector_number(struct kunit *test) struct vkms_config_connector *connector_cfg; int n; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); /* Invalid: No connectors */ @@ -610,7 +623,7 @@ static void vkms_config_test_valid_connector_possible_encoders(struct kunit *tes struct vkms_config_encoder *encoder_cfg; struct vkms_config_connector *connector_cfg; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); encoder_cfg = get_first_encoder(config); @@ -957,6 +970,29 @@ static void vkms_config_test_connector_get_possible_encoders(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_connector_status(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_connector *connector_cfg; + enum drm_connector_status status; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + connector_cfg = vkms_config_create_connector(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg); + + status = vkms_config_connector_get_status(connector_cfg); + KUNIT_EXPECT_EQ(test, status, connector_status_connected); + + vkms_config_connector_set_status(connector_cfg, + connector_status_disconnected); + status = vkms_config_connector_get_status(connector_cfg); + KUNIT_EXPECT_EQ(test, status, connector_status_disconnected); + + vkms_config_destroy(config); +} + static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_empty_config), KUNIT_CASE_PARAM(vkms_config_test_default_config, @@ -978,6 +1014,7 @@ static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs), KUNIT_CASE(vkms_config_test_encoder_get_possible_crtcs), KUNIT_CASE(vkms_config_test_connector_get_possible_encoders), + KUNIT_CASE(vkms_config_test_connector_status), {} }; diff --git a/drivers/gpu/drm/vkms/vkms_colorop.c b/drivers/gpu/drm/vkms/vkms_colorop.c new file mode 100644 index 000000000000..5c3ffc78aea0 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_colorop.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/slab.h> +#include <drm/drm_colorop.h> +#include <drm/drm_print.h> +#include <drm/drm_property.h> +#include <drm/drm_plane.h> + +#include "vkms_drv.h" + +static const u64 supported_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF); + +#define MAX_COLOR_PIPELINE_OPS 4 + +static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list) +{ + struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS]; + struct drm_device *dev = plane->dev; + int ret; + int i = 0, j = 0; + + memset(ops, 0, sizeof(ops)); + + /* 1st op: 1d curve */ + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); + if (!ops[i]) { + drm_err(dev, "KMS: Failed to allocate colorop\n"); + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, supported_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + list->type = ops[i]->base.id; + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); + + i++; + + /* 2nd op: 3x4 matrix */ + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); + if (!ops[i]) { + drm_err(dev, "KMS: Failed to allocate colorop\n"); + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i - 1], ops[i]); + + i++; + + /* 3rd op: 3x4 matrix */ + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); + if (!ops[i]) { + drm_err(dev, "KMS: Failed to allocate colorop\n"); + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i - 1], ops[i]); + + i++; + + /* 4th op: 1d curve */ + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); + if (!ops[i]) { + drm_err(dev, "KMS: Failed to allocate colorop\n"); + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, supported_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i - 1], ops[i]); + + return 0; + +cleanup: + for (j = 0; j < i; j++) { + if (ops[j]) { + drm_colorop_cleanup(ops[j]); + kfree(ops[j]); + } + } + + return ret; +} + +int vkms_initialize_colorops(struct drm_plane *plane) +{ + struct drm_prop_enum_list pipeline; + int ret; + + /* Add color pipeline */ + ret = vkms_initialize_color_pipeline(plane, &pipeline); + if (ret) + return ret; + + /* Create COLOR_PIPELINE property and attach */ + ret = drm_plane_create_color_pipeline_property(plane, &pipeline, 1); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index fa269d279e25..3cf3f26e0d8e 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -8,10 +8,13 @@ #include <drm/drm_fourcc.h> #include <drm/drm_fixed.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include <drm/drm_vblank.h> #include <linux/minmax.h> +#include <kunit/visibility.h> -#include "vkms_drv.h" +#include "vkms_composer.h" +#include "vkms_luts.h" static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha) { @@ -60,7 +63,7 @@ static void fill_background(const struct pixel_argb_u16 *background_color, } // lerp(a, b, t) = a + (b - a) * t -static u16 lerp_u16(u16 a, u16 b, s64 t) +VISIBLE_IF_KUNIT u16 lerp_u16(u16 a, u16 b, s64 t) { s64 a_fp = drm_int2fixp(a); s64 b_fp = drm_int2fixp(b); @@ -69,27 +72,18 @@ static u16 lerp_u16(u16 a, u16 b, s64 t) return drm_fixp2int_round(a_fp + delta); } +EXPORT_SYMBOL_IF_KUNIT(lerp_u16); -static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value) +VISIBLE_IF_KUNIT s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value) { s64 color_channel_fp = drm_int2fixp(channel_value); return drm_fixp_mul(color_channel_fp, lut->channel_value2index_ratio); } +EXPORT_SYMBOL_IF_KUNIT(get_lut_index); -/* - * This enum is related to the positions of the variables inside - * `struct drm_color_lut`, so the order of both needs to be the same. - */ -enum lut_channel { - LUT_RED = 0, - LUT_GREEN, - LUT_BLUE, - LUT_RESERVED -}; - -static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value, - enum lut_channel channel) +VISIBLE_IF_KUNIT u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value, + enum lut_channel channel) { s64 lut_index = get_lut_index(lut, channel_value); u16 *floor_lut_value, *ceil_lut_value; @@ -114,6 +108,8 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan return lerp_u16(floor_channel_value, ceil_channel_value, lut_index & DRM_FIXED_DECIMAL_MASK); } +EXPORT_SYMBOL_IF_KUNIT(apply_lut_to_channel_value); + static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buffer *output_buffer) { @@ -132,6 +128,112 @@ static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buff } } +VISIBLE_IF_KUNIT void apply_3x4_matrix(struct pixel_argb_s32 *pixel, + const struct drm_color_ctm_3x4 *matrix) +{ + s64 rf, gf, bf; + s64 r, g, b; + + r = drm_int2fixp(pixel->r); + g = drm_int2fixp(pixel->g); + b = drm_int2fixp(pixel->b); + + rf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[0]), r) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[1]), g) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[2]), b) + + drm_sm2fixp(matrix->matrix[3]); + + gf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[4]), r) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[5]), g) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[6]), b) + + drm_sm2fixp(matrix->matrix[7]); + + bf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[8]), r) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[9]), g) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[10]), b) + + drm_sm2fixp(matrix->matrix[11]); + + pixel->r = drm_fixp2int_round(rf); + pixel->g = drm_fixp2int_round(gf); + pixel->b = drm_fixp2int_round(bf); +} +EXPORT_SYMBOL_IF_KUNIT(apply_3x4_matrix); + +static void apply_colorop(struct pixel_argb_s32 *pixel, struct drm_colorop *colorop) +{ + struct drm_colorop_state *colorop_state = colorop->state; + struct drm_device *dev = colorop->dev; + + if (colorop->type == DRM_COLOROP_1D_CURVE) { + switch (colorop_state->curve_1d_type) { + case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF: + pixel->r = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->r, LUT_RED); + pixel->g = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->g, LUT_GREEN); + pixel->b = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->b, LUT_BLUE); + break; + case DRM_COLOROP_1D_CURVE_SRGB_EOTF: + pixel->r = apply_lut_to_channel_value(&srgb_eotf, pixel->r, LUT_RED); + pixel->g = apply_lut_to_channel_value(&srgb_eotf, pixel->g, LUT_GREEN); + pixel->b = apply_lut_to_channel_value(&srgb_eotf, pixel->b, LUT_BLUE); + break; + default: + drm_WARN_ONCE(dev, true, + "unknown colorop 1D curve type %d\n", + colorop_state->curve_1d_type); + break; + } + } else if (colorop->type == DRM_COLOROP_CTM_3X4) { + if (colorop_state->data) + apply_3x4_matrix(pixel, + (struct drm_color_ctm_3x4 *)colorop_state->data->data); + } +} + +static void pre_blend_color_transform(const struct vkms_plane_state *plane_state, + struct line_buffer *output_buffer) +{ + struct pixel_argb_s32 pixel; + + for (size_t x = 0; x < output_buffer->n_pixels; x++) { + struct drm_colorop *colorop = plane_state->base.base.color_pipeline; + + /* + * Some operations, such as applying a BT709 encoding matrix, + * followed by a decoding matrix, require that we preserve + * values above 1.0 and below 0.0 until the end of the pipeline. + * + * Pack the 16-bit UNORM values into s32 to give us head-room to + * avoid clipping until we're at the end of the pipeline. Clip + * intentionally at the end of the pipeline before packing + * UNORM values back into u16. + */ + pixel.a = output_buffer->pixels[x].a; + pixel.r = output_buffer->pixels[x].r; + pixel.g = output_buffer->pixels[x].g; + pixel.b = output_buffer->pixels[x].b; + + while (colorop) { + struct drm_colorop_state *colorop_state; + + colorop_state = colorop->state; + + if (!colorop_state) + return; + + if (!colorop_state->bypass) + apply_colorop(&pixel, colorop); + + colorop = colorop->next; + } + + /* clamp values */ + output_buffer->pixels[x].a = clamp_val(pixel.a, 0, 0xffff); + output_buffer->pixels[x].r = clamp_val(pixel.r, 0, 0xffff); + output_buffer->pixels[x].g = clamp_val(pixel.g, 0, 0xffff); + output_buffer->pixels[x].b = clamp_val(pixel.b, 0, 0xffff); + } +} + /** * direction_for_rotation() - Get the correct reading direction for a given rotation * @@ -347,7 +449,7 @@ static void blend_line(struct vkms_plane_state *current_plane, int y, */ current_plane->pixel_read_line(current_plane, src_x_start, src_y_start, direction, pixel_count, &stage_buffer->pixels[dst_x_start]); - + pre_blend_color_transform(current_plane, stage_buffer); pre_mul_alpha_blend(stage_buffer, output_buffer, dst_x_start, pixel_count); } diff --git a/drivers/gpu/drm/vkms/vkms_composer.h b/drivers/gpu/drm/vkms/vkms_composer.h new file mode 100644 index 000000000000..04dd5646f672 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_composer.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _VKMS_COMPOSER_H_ +#define _VKMS_COMPOSER_H_ + +#include <kunit/visibility.h> +#include "vkms_drv.h" + +/* + * This enum is related to the positions of the variables inside + * `struct drm_color_lut`, so the order of both needs to be the same. + */ +enum lut_channel { + LUT_RED = 0, + LUT_GREEN, + LUT_BLUE, + LUT_RESERVED +}; + +#if IS_ENABLED(CONFIG_KUNIT) +u16 lerp_u16(u16 a, u16 b, s64 t); +s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value); +u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value, + enum lut_channel channel); +void apply_3x4_matrix(struct pixel_argb_s32 *pixel, const struct drm_color_ctm_3x4 *matrix); +#endif + +#endif /* _VKMS_COMPOSER_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index a1df5659b0fb..8788df9edb7c 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -33,7 +33,8 @@ EXPORT_SYMBOL_IF_KUNIT(vkms_config_create); struct vkms_config *vkms_config_default_create(bool enable_cursor, bool enable_writeback, - bool enable_overlay) + bool enable_overlay, + bool enable_plane_pipeline) { struct vkms_config *config; struct vkms_config_plane *plane_cfg; @@ -58,6 +59,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) goto err_alloc; + vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline); if (enable_overlay) { for (n = 0; n < NUM_OVERLAY_PLANES; n++) { @@ -67,6 +69,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); + vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline); if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) goto err_alloc; @@ -79,6 +82,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, goto err_alloc; vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline); if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) goto err_alloc; @@ -361,8 +365,11 @@ static int vkms_config_show(struct seq_file *m, void *data) vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) seq_puts(m, "encoder\n"); - vkms_config_for_each_connector(vkmsdev->config, connector_cfg) - seq_puts(m, "connector\n"); + vkms_config_for_each_connector(vkmsdev->config, connector_cfg) { + seq_puts(m, "connector:\n"); + seq_printf(m, "\tstatus=%d\n", + vkms_config_connector_get_status(connector_cfg)); + } return 0; } @@ -386,6 +393,7 @@ struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config) return ERR_PTR(-ENOMEM); plane_cfg->config = config; + plane_cfg->default_pipeline = false; vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC); @@ -588,6 +596,7 @@ struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *c return ERR_PTR(-ENOMEM); connector_cfg->config = config; + connector_cfg->status = connector_status_connected; xa_init_flags(&connector_cfg->possible_encoders, XA_FLAGS_ALLOC); list_add_tail(&connector_cfg->link, &config->connectors); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index 0118e3f99706..8f7f286a4bdd 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -7,6 +7,8 @@ #include <linux/types.h> #include <linux/xarray.h> +#include <drm/drm_connector.h> + #include "vkms_drv.h" /** @@ -47,6 +49,7 @@ struct vkms_config_plane { enum drm_plane_type type; struct xarray possible_crtcs; + bool default_pipeline; /* Internal usage */ struct vkms_plane *plane; @@ -99,6 +102,7 @@ struct vkms_config_encoder { * * @link: Link to the others connector in vkms_config * @config: The vkms_config this connector belongs to + * @status: Status (connected, disconnected...) of the connector * @possible_encoders: Array of encoders that can be used with this connector * @connector: Internal usage. This pointer should never be considered as valid. * It can be used to store a temporary reference to a VKMS connector @@ -109,6 +113,7 @@ struct vkms_config_connector { struct list_head link; struct vkms_config *config; + enum drm_connector_status status; struct xarray possible_encoders; /* Internal usage */ @@ -199,7 +204,8 @@ struct vkms_config *vkms_config_create(const char *dev_name); */ struct vkms_config *vkms_config_default_create(bool enable_cursor, bool enable_writeback, - bool enable_overlay); + bool enable_overlay, + bool enable_plane_pipeline); /** * vkms_config_destroy() - Free a VKMS configuration @@ -285,6 +291,30 @@ vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg, } /** + * vkms_config_plane_get_default_pipeline() - Return if the plane will + * be created with the default pipeline + * @plane_cfg: Plane to get the information from + */ +static inline bool +vkms_config_plane_get_default_pipeline(struct vkms_config_plane *plane_cfg) +{ + return plane_cfg->default_pipeline; +} + +/** + * vkms_config_plane_set_default_pipeline() - Set if the plane will + * be created with the default pipeline + * @plane_cfg: Plane to configure the pipeline + * @default_pipeline: New default pipeline value + */ +static inline void +vkms_config_plane_set_default_pipeline(struct vkms_config_plane *plane_cfg, + bool default_pipeline) +{ + plane_cfg->default_pipeline = default_pipeline; +} + +/** * vkms_config_plane_attach_crtc - Attach a plane to a CRTC * @plane_cfg: Plane to attach * @crtc_cfg: CRTC to attach @plane_cfg to @@ -434,4 +464,26 @@ int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connect void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg, struct vkms_config_encoder *encoder_cfg); +/** + * vkms_config_connector_get_status() - Return the status of the connector + * @connector_cfg: Connector to get the status from + */ +static inline enum drm_connector_status +vkms_config_connector_get_status(struct vkms_config_connector *connector_cfg) +{ + return connector_cfg->status; +} + +/** + * vkms_config_connector_set_status() - Set the status of the connector + * @connector_cfg: Connector to set the status to + * @status: New connector status + */ +static inline void +vkms_config_connector_set_status(struct vkms_config_connector *connector_cfg, + enum drm_connector_status status) +{ + connector_cfg->status = status; +} + #endif /* _VKMS_CONFIG_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_configfs.c b/drivers/gpu/drm/vkms/vkms_configfs.c new file mode 100644 index 000000000000..506666e21c91 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_configfs.c @@ -0,0 +1,843 @@ +// SPDX-License-Identifier: GPL-2.0+ +#include <linux/cleanup.h> +#include <linux/configfs.h> +#include <linux/mutex.h> +#include <linux/slab.h> + +#include "vkms_drv.h" +#include "vkms_config.h" +#include "vkms_configfs.h" +#include "vkms_connector.h" + +/* To avoid registering configfs more than once or unregistering on error */ +static bool is_configfs_registered; + +/** + * struct vkms_configfs_device - Configfs representation of a VKMS device + * + * @group: Top level configuration group that represents a VKMS device. + * Initialized when a new directory is created under "/config/vkms/" + * @planes_group: Default subgroup of @group at "/config/vkms/planes" + * @crtcs_group: Default subgroup of @group at "/config/vkms/crtcs" + * @encoders_group: Default subgroup of @group at "/config/vkms/encoders" + * @connectors_group: Default subgroup of @group at "/config/vkms/connectors" + * @lock: Lock used to project concurrent access to the configuration attributes + * @config: Protected by @lock. Configuration of the VKMS device + * @enabled: Protected by @lock. The device is created or destroyed when this + * option changes + */ +struct vkms_configfs_device { + struct config_group group; + struct config_group planes_group; + struct config_group crtcs_group; + struct config_group encoders_group; + struct config_group connectors_group; + + struct mutex lock; + struct vkms_config *config; + bool enabled; +}; + +/** + * struct vkms_configfs_plane - Configfs representation of a plane + * + * @group: Top level configuration group that represents a plane. + * Initialized when a new directory is created under "/config/vkms/planes" + * @possible_crtcs_group: Default subgroup of @group at "plane/possible_crtcs" + * @dev: The vkms_configfs_device this plane belongs to + * @config: Configuration of the VKMS plane + */ +struct vkms_configfs_plane { + struct config_group group; + struct config_group possible_crtcs_group; + struct vkms_configfs_device *dev; + struct vkms_config_plane *config; +}; + +/** + * struct vkms_configfs_crtc - Configfs representation of a CRTC + * + * @group: Top level configuration group that represents a CRTC. + * Initialized when a new directory is created under "/config/vkms/crtcs" + * @dev: The vkms_configfs_device this CRTC belongs to + * @config: Configuration of the VKMS CRTC + */ +struct vkms_configfs_crtc { + struct config_group group; + struct vkms_configfs_device *dev; + struct vkms_config_crtc *config; +}; + +/** + * struct vkms_configfs_encoder - Configfs representation of a encoder + * + * @group: Top level configuration group that represents a encoder. + * Initialized when a new directory is created under "/config/vkms/encoders" + * @possible_crtcs_group: Default subgroup of @group at "encoder/possible_crtcs" + * @dev: The vkms_configfs_device this encoder belongs to + * @config: Configuration of the VKMS encoder + */ +struct vkms_configfs_encoder { + struct config_group group; + struct config_group possible_crtcs_group; + struct vkms_configfs_device *dev; + struct vkms_config_encoder *config; +}; + +/** + * struct vkms_configfs_connector - Configfs representation of a connector + * + * @group: Top level configuration group that represents a connector. + * Initialized when a new directory is created under "/config/vkms/connectors" + * @possible_encoders_group: Default subgroup of @group at + * "connector/possible_encoders" + * @dev: The vkms_configfs_device this connector belongs to + * @config: Configuration of the VKMS connector + */ +struct vkms_configfs_connector { + struct config_group group; + struct config_group possible_encoders_group; + struct vkms_configfs_device *dev; + struct vkms_config_connector *config; +}; + +#define device_item_to_vkms_configfs_device(item) \ + container_of(to_config_group((item)), struct vkms_configfs_device, \ + group) + +#define child_group_to_vkms_configfs_device(group) \ + device_item_to_vkms_configfs_device((&(group)->cg_item)->ci_parent) + +#define plane_item_to_vkms_configfs_plane(item) \ + container_of(to_config_group((item)), struct vkms_configfs_plane, group) + +#define plane_possible_crtcs_item_to_vkms_configfs_plane(item) \ + container_of(to_config_group((item)), struct vkms_configfs_plane, \ + possible_crtcs_group) + +#define crtc_item_to_vkms_configfs_crtc(item) \ + container_of(to_config_group((item)), struct vkms_configfs_crtc, group) + +#define encoder_item_to_vkms_configfs_encoder(item) \ + container_of(to_config_group((item)), struct vkms_configfs_encoder, \ + group) + +#define encoder_possible_crtcs_item_to_vkms_configfs_encoder(item) \ + container_of(to_config_group((item)), struct vkms_configfs_encoder, \ + possible_crtcs_group) + +#define connector_item_to_vkms_configfs_connector(item) \ + container_of(to_config_group((item)), struct vkms_configfs_connector, \ + group) + +#define connector_possible_encoders_item_to_vkms_configfs_connector(item) \ + container_of(to_config_group((item)), struct vkms_configfs_connector, \ + possible_encoders_group) + +static ssize_t crtc_writeback_show(struct config_item *item, char *page) +{ + struct vkms_configfs_crtc *crtc; + bool writeback; + + crtc = crtc_item_to_vkms_configfs_crtc(item); + + scoped_guard(mutex, &crtc->dev->lock) + writeback = vkms_config_crtc_get_writeback(crtc->config); + + return sprintf(page, "%d\n", writeback); +} + +static ssize_t crtc_writeback_store(struct config_item *item, const char *page, + size_t count) +{ + struct vkms_configfs_crtc *crtc; + bool writeback; + + crtc = crtc_item_to_vkms_configfs_crtc(item); + + if (kstrtobool(page, &writeback)) + return -EINVAL; + + scoped_guard(mutex, &crtc->dev->lock) { + if (crtc->dev->enabled) + return -EBUSY; + + vkms_config_crtc_set_writeback(crtc->config, writeback); + } + + return (ssize_t)count; +} + +CONFIGFS_ATTR(crtc_, writeback); + +static struct configfs_attribute *crtc_item_attrs[] = { + &crtc_attr_writeback, + NULL, +}; + +static void crtc_release(struct config_item *item) +{ + struct vkms_configfs_crtc *crtc; + struct mutex *lock; + + crtc = crtc_item_to_vkms_configfs_crtc(item); + lock = &crtc->dev->lock; + + scoped_guard(mutex, lock) { + vkms_config_destroy_crtc(crtc->dev->config, crtc->config); + kfree(crtc); + } +} + +static struct configfs_item_operations crtc_item_operations = { + .release = &crtc_release, +}; + +static const struct config_item_type crtc_item_type = { + .ct_attrs = crtc_item_attrs, + .ct_item_ops = &crtc_item_operations, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *make_crtc_group(struct config_group *group, + const char *name) +{ + struct vkms_configfs_device *dev; + struct vkms_configfs_crtc *crtc; + int ret; + + dev = child_group_to_vkms_configfs_device(group); + + scoped_guard(mutex, &dev->lock) { + if (dev->enabled) + return ERR_PTR(-EBUSY); + + crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); + if (!crtc) + return ERR_PTR(-ENOMEM); + + crtc->dev = dev; + + crtc->config = vkms_config_create_crtc(dev->config); + if (IS_ERR(crtc->config)) { + ret = PTR_ERR(crtc->config); + kfree(crtc); + return ERR_PTR(ret); + } + + config_group_init_type_name(&crtc->group, name, &crtc_item_type); + } + + return &crtc->group; +} + +static struct configfs_group_operations crtcs_group_operations = { + .make_group = &make_crtc_group, +}; + +static const struct config_item_type crtc_group_type = { + .ct_group_ops = &crtcs_group_operations, + .ct_owner = THIS_MODULE, +}; + +static int plane_possible_crtcs_allow_link(struct config_item *src, + struct config_item *target) +{ + struct vkms_configfs_plane *plane; + struct vkms_configfs_crtc *crtc; + int ret; + + if (target->ci_type != &crtc_item_type) + return -EINVAL; + + plane = plane_possible_crtcs_item_to_vkms_configfs_plane(src); + crtc = crtc_item_to_vkms_configfs_crtc(target); + + scoped_guard(mutex, &plane->dev->lock) { + if (plane->dev->enabled) + return -EBUSY; + + ret = vkms_config_plane_attach_crtc(plane->config, crtc->config); + } + + return ret; +} + +static void plane_possible_crtcs_drop_link(struct config_item *src, + struct config_item *target) +{ + struct vkms_configfs_plane *plane; + struct vkms_configfs_crtc *crtc; + + plane = plane_possible_crtcs_item_to_vkms_configfs_plane(src); + crtc = crtc_item_to_vkms_configfs_crtc(target); + + scoped_guard(mutex, &plane->dev->lock) + vkms_config_plane_detach_crtc(plane->config, crtc->config); +} + +static struct configfs_item_operations plane_possible_crtcs_item_operations = { + .allow_link = plane_possible_crtcs_allow_link, + .drop_link = plane_possible_crtcs_drop_link, +}; + +static const struct config_item_type plane_possible_crtcs_group_type = { + .ct_item_ops = &plane_possible_crtcs_item_operations, + .ct_owner = THIS_MODULE, +}; + +static ssize_t plane_type_show(struct config_item *item, char *page) +{ + struct vkms_configfs_plane *plane; + enum drm_plane_type type; + + plane = plane_item_to_vkms_configfs_plane(item); + + scoped_guard(mutex, &plane->dev->lock) + type = vkms_config_plane_get_type(plane->config); + + return sprintf(page, "%u", type); +} + +static ssize_t plane_type_store(struct config_item *item, const char *page, + size_t count) +{ + struct vkms_configfs_plane *plane; + enum drm_plane_type type; + + plane = plane_item_to_vkms_configfs_plane(item); + + if (kstrtouint(page, 10, &type)) + return -EINVAL; + + if (type != DRM_PLANE_TYPE_OVERLAY && type != DRM_PLANE_TYPE_PRIMARY && + type != DRM_PLANE_TYPE_CURSOR) + return -EINVAL; + + scoped_guard(mutex, &plane->dev->lock) { + if (plane->dev->enabled) + return -EBUSY; + + vkms_config_plane_set_type(plane->config, type); + } + + return (ssize_t)count; +} + +CONFIGFS_ATTR(plane_, type); + +static struct configfs_attribute *plane_item_attrs[] = { + &plane_attr_type, + NULL, +}; + +static void plane_release(struct config_item *item) +{ + struct vkms_configfs_plane *plane; + struct mutex *lock; + + plane = plane_item_to_vkms_configfs_plane(item); + lock = &plane->dev->lock; + + scoped_guard(mutex, lock) { + vkms_config_destroy_plane(plane->config); + kfree(plane); + } +} + +static struct configfs_item_operations plane_item_operations = { + .release = &plane_release, +}; + +static const struct config_item_type plane_item_type = { + .ct_attrs = plane_item_attrs, + .ct_item_ops = &plane_item_operations, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *make_plane_group(struct config_group *group, + const char *name) +{ + struct vkms_configfs_device *dev; + struct vkms_configfs_plane *plane; + int ret; + + dev = child_group_to_vkms_configfs_device(group); + + scoped_guard(mutex, &dev->lock) { + if (dev->enabled) + return ERR_PTR(-EBUSY); + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + plane->dev = dev; + + plane->config = vkms_config_create_plane(dev->config); + if (IS_ERR(plane->config)) { + ret = PTR_ERR(plane->config); + kfree(plane); + return ERR_PTR(ret); + } + + config_group_init_type_name(&plane->group, name, &plane_item_type); + + config_group_init_type_name(&plane->possible_crtcs_group, + "possible_crtcs", + &plane_possible_crtcs_group_type); + configfs_add_default_group(&plane->possible_crtcs_group, + &plane->group); + } + + return &plane->group; +} + +static struct configfs_group_operations planes_group_operations = { + .make_group = &make_plane_group, +}; + +static const struct config_item_type plane_group_type = { + .ct_group_ops = &planes_group_operations, + .ct_owner = THIS_MODULE, +}; + +static int encoder_possible_crtcs_allow_link(struct config_item *src, + struct config_item *target) +{ + struct vkms_configfs_encoder *encoder; + struct vkms_configfs_crtc *crtc; + int ret; + + if (target->ci_type != &crtc_item_type) + return -EINVAL; + + encoder = encoder_possible_crtcs_item_to_vkms_configfs_encoder(src); + crtc = crtc_item_to_vkms_configfs_crtc(target); + + scoped_guard(mutex, &encoder->dev->lock) { + if (encoder->dev->enabled) + return -EBUSY; + + ret = vkms_config_encoder_attach_crtc(encoder->config, crtc->config); + } + + return ret; +} + +static void encoder_possible_crtcs_drop_link(struct config_item *src, + struct config_item *target) +{ + struct vkms_configfs_encoder *encoder; + struct vkms_configfs_crtc *crtc; + + encoder = encoder_possible_crtcs_item_to_vkms_configfs_encoder(src); + crtc = crtc_item_to_vkms_configfs_crtc(target); + + scoped_guard(mutex, &encoder->dev->lock) + vkms_config_encoder_detach_crtc(encoder->config, crtc->config); +} + +static struct configfs_item_operations encoder_possible_crtcs_item_operations = { + .allow_link = encoder_possible_crtcs_allow_link, + .drop_link = encoder_possible_crtcs_drop_link, +}; + +static const struct config_item_type encoder_possible_crtcs_group_type = { + .ct_item_ops = &encoder_possible_crtcs_item_operations, + .ct_owner = THIS_MODULE, +}; + +static void encoder_release(struct config_item *item) +{ + struct vkms_configfs_encoder *encoder; + struct mutex *lock; + + encoder = encoder_item_to_vkms_configfs_encoder(item); + lock = &encoder->dev->lock; + + scoped_guard(mutex, lock) { + vkms_config_destroy_encoder(encoder->dev->config, encoder->config); + kfree(encoder); + } +} + +static struct configfs_item_operations encoder_item_operations = { + .release = &encoder_release, +}; + +static const struct config_item_type encoder_item_type = { + .ct_item_ops = &encoder_item_operations, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *make_encoder_group(struct config_group *group, + const char *name) +{ + struct vkms_configfs_device *dev; + struct vkms_configfs_encoder *encoder; + int ret; + + dev = child_group_to_vkms_configfs_device(group); + + scoped_guard(mutex, &dev->lock) { + if (dev->enabled) + return ERR_PTR(-EBUSY); + + encoder = kzalloc(sizeof(*encoder), GFP_KERNEL); + if (!encoder) + return ERR_PTR(-ENOMEM); + + encoder->dev = dev; + + encoder->config = vkms_config_create_encoder(dev->config); + if (IS_ERR(encoder->config)) { + ret = PTR_ERR(encoder->config); + kfree(encoder); + return ERR_PTR(ret); + } + + config_group_init_type_name(&encoder->group, name, + &encoder_item_type); + + config_group_init_type_name(&encoder->possible_crtcs_group, + "possible_crtcs", + &encoder_possible_crtcs_group_type); + configfs_add_default_group(&encoder->possible_crtcs_group, + &encoder->group); + } + + return &encoder->group; +} + +static struct configfs_group_operations encoders_group_operations = { + .make_group = &make_encoder_group, +}; + +static const struct config_item_type encoder_group_type = { + .ct_group_ops = &encoders_group_operations, + .ct_owner = THIS_MODULE, +}; + +static ssize_t connector_status_show(struct config_item *item, char *page) +{ + struct vkms_configfs_connector *connector; + enum drm_connector_status status; + + connector = connector_item_to_vkms_configfs_connector(item); + + scoped_guard(mutex, &connector->dev->lock) + status = vkms_config_connector_get_status(connector->config); + + return sprintf(page, "%u", status); +} + +static ssize_t connector_status_store(struct config_item *item, + const char *page, size_t count) +{ + struct vkms_configfs_connector *connector; + enum drm_connector_status status; + + connector = connector_item_to_vkms_configfs_connector(item); + + if (kstrtouint(page, 10, &status)) + return -EINVAL; + + if (status != connector_status_connected && + status != connector_status_disconnected && + status != connector_status_unknown) + return -EINVAL; + + scoped_guard(mutex, &connector->dev->lock) { + vkms_config_connector_set_status(connector->config, status); + + if (connector->dev->enabled) + vkms_trigger_connector_hotplug(connector->dev->config->dev); + } + + return (ssize_t)count; +} + +CONFIGFS_ATTR(connector_, status); + +static struct configfs_attribute *connector_item_attrs[] = { + &connector_attr_status, + NULL, +}; + +static void connector_release(struct config_item *item) +{ + struct vkms_configfs_connector *connector; + struct mutex *lock; + + connector = connector_item_to_vkms_configfs_connector(item); + lock = &connector->dev->lock; + + scoped_guard(mutex, lock) { + vkms_config_destroy_connector(connector->config); + kfree(connector); + } +} + +static struct configfs_item_operations connector_item_operations = { + .release = &connector_release, +}; + +static const struct config_item_type connector_item_type = { + .ct_attrs = connector_item_attrs, + .ct_item_ops = &connector_item_operations, + .ct_owner = THIS_MODULE, +}; + +static int connector_possible_encoders_allow_link(struct config_item *src, + struct config_item *target) +{ + struct vkms_configfs_connector *connector; + struct vkms_configfs_encoder *encoder; + int ret; + + if (target->ci_type != &encoder_item_type) + return -EINVAL; + + connector = connector_possible_encoders_item_to_vkms_configfs_connector(src); + encoder = encoder_item_to_vkms_configfs_encoder(target); + + scoped_guard(mutex, &connector->dev->lock) { + if (connector->dev->enabled) + return -EBUSY; + + ret = vkms_config_connector_attach_encoder(connector->config, + encoder->config); + } + + return ret; +} + +static void connector_possible_encoders_drop_link(struct config_item *src, + struct config_item *target) +{ + struct vkms_configfs_connector *connector; + struct vkms_configfs_encoder *encoder; + + connector = connector_possible_encoders_item_to_vkms_configfs_connector(src); + encoder = encoder_item_to_vkms_configfs_encoder(target); + + scoped_guard(mutex, &connector->dev->lock) { + vkms_config_connector_detach_encoder(connector->config, + encoder->config); + } +} + +static struct configfs_item_operations connector_possible_encoders_item_operations = { + .allow_link = connector_possible_encoders_allow_link, + .drop_link = connector_possible_encoders_drop_link, +}; + +static const struct config_item_type connector_possible_encoders_group_type = { + .ct_item_ops = &connector_possible_encoders_item_operations, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *make_connector_group(struct config_group *group, + const char *name) +{ + struct vkms_configfs_device *dev; + struct vkms_configfs_connector *connector; + int ret; + + dev = child_group_to_vkms_configfs_device(group); + + scoped_guard(mutex, &dev->lock) { + if (dev->enabled) + return ERR_PTR(-EBUSY); + + connector = kzalloc(sizeof(*connector), GFP_KERNEL); + if (!connector) + return ERR_PTR(-ENOMEM); + + connector->dev = dev; + + connector->config = vkms_config_create_connector(dev->config); + if (IS_ERR(connector->config)) { + ret = PTR_ERR(connector->config); + kfree(connector); + return ERR_PTR(ret); + } + + config_group_init_type_name(&connector->group, name, + &connector_item_type); + + config_group_init_type_name(&connector->possible_encoders_group, + "possible_encoders", + &connector_possible_encoders_group_type); + configfs_add_default_group(&connector->possible_encoders_group, + &connector->group); + } + + return &connector->group; +} + +static struct configfs_group_operations connectors_group_operations = { + .make_group = &make_connector_group, +}; + +static const struct config_item_type connector_group_type = { + .ct_group_ops = &connectors_group_operations, + .ct_owner = THIS_MODULE, +}; + +static ssize_t device_enabled_show(struct config_item *item, char *page) +{ + struct vkms_configfs_device *dev; + bool enabled; + + dev = device_item_to_vkms_configfs_device(item); + + scoped_guard(mutex, &dev->lock) + enabled = dev->enabled; + + return sprintf(page, "%d\n", enabled); +} + +static ssize_t device_enabled_store(struct config_item *item, const char *page, + size_t count) +{ + struct vkms_configfs_device *dev; + bool enabled; + int ret = 0; + + dev = device_item_to_vkms_configfs_device(item); + + if (kstrtobool(page, &enabled)) + return -EINVAL; + + scoped_guard(mutex, &dev->lock) { + if (!dev->enabled && enabled) { + if (!vkms_config_is_valid(dev->config)) + return -EINVAL; + + ret = vkms_create(dev->config); + if (ret) + return ret; + } else if (dev->enabled && !enabled) { + vkms_destroy(dev->config); + } + + dev->enabled = enabled; + } + + return (ssize_t)count; +} + +CONFIGFS_ATTR(device_, enabled); + +static struct configfs_attribute *device_item_attrs[] = { + &device_attr_enabled, + NULL, +}; + +static void device_release(struct config_item *item) +{ + struct vkms_configfs_device *dev; + + dev = device_item_to_vkms_configfs_device(item); + + if (dev->enabled) + vkms_destroy(dev->config); + + mutex_destroy(&dev->lock); + vkms_config_destroy(dev->config); + kfree(dev); +} + +static struct configfs_item_operations device_item_operations = { + .release = &device_release, +}; + +static const struct config_item_type device_item_type = { + .ct_attrs = device_item_attrs, + .ct_item_ops = &device_item_operations, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *make_device_group(struct config_group *group, + const char *name) +{ + struct vkms_configfs_device *dev; + int ret; + + if (strcmp(name, DEFAULT_DEVICE_NAME) == 0) + return ERR_PTR(-EINVAL); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + dev->config = vkms_config_create(name); + if (IS_ERR(dev->config)) { + ret = PTR_ERR(dev->config); + kfree(dev); + return ERR_PTR(ret); + } + + config_group_init_type_name(&dev->group, name, &device_item_type); + mutex_init(&dev->lock); + + config_group_init_type_name(&dev->planes_group, "planes", + &plane_group_type); + configfs_add_default_group(&dev->planes_group, &dev->group); + + config_group_init_type_name(&dev->crtcs_group, "crtcs", + &crtc_group_type); + configfs_add_default_group(&dev->crtcs_group, &dev->group); + + config_group_init_type_name(&dev->encoders_group, "encoders", + &encoder_group_type); + configfs_add_default_group(&dev->encoders_group, &dev->group); + + config_group_init_type_name(&dev->connectors_group, "connectors", + &connector_group_type); + configfs_add_default_group(&dev->connectors_group, &dev->group); + + return &dev->group; +} + +static struct configfs_group_operations device_group_ops = { + .make_group = &make_device_group, +}; + +static const struct config_item_type device_group_type = { + .ct_group_ops = &device_group_ops, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem vkms_subsys = { + .su_group = { + .cg_item = { + .ci_name = "vkms", + .ci_type = &device_group_type, + }, + }, + .su_mutex = __MUTEX_INITIALIZER(vkms_subsys.su_mutex), +}; + +int vkms_configfs_register(void) +{ + int ret; + + if (is_configfs_registered) + return 0; + + config_group_init(&vkms_subsys.su_group); + ret = configfs_register_subsystem(&vkms_subsys); + + is_configfs_registered = ret == 0; + + return ret; +} + +void vkms_configfs_unregister(void) +{ + if (is_configfs_registered) + configfs_unregister_subsystem(&vkms_subsys); +} diff --git a/drivers/gpu/drm/vkms/vkms_configfs.h b/drivers/gpu/drm/vkms/vkms_configfs.h new file mode 100644 index 000000000000..e9020b0043db --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_configfs.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _VKMS_CONFIGFS_H_ +#define _VKMS_CONFIGFS_H_ + +int vkms_configfs_register(void); +void vkms_configfs_unregister(void); + +#endif /* _VKMS_CONFIGFS_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c index 48b10cba322a..b0a6b212d3f4 100644 --- a/drivers/gpu/drm/vkms/vkms_connector.c +++ b/drivers/gpu/drm/vkms/vkms_connector.c @@ -5,9 +5,37 @@ #include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> +#include "vkms_config.h" #include "vkms_connector.h" +static enum drm_connector_status vkms_connector_detect(struct drm_connector *connector, + bool force) +{ + struct drm_device *dev = connector->dev; + struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); + struct vkms_connector *vkms_connector; + enum drm_connector_status status; + struct vkms_config_connector *connector_cfg; + + vkms_connector = drm_connector_to_vkms_connector(connector); + + /* + * The connector configuration might not exist if its configfs directory + * was deleted. Therefore, use the configuration if present or keep the + * current status if we can not access it anymore. + */ + status = connector->status; + + vkms_config_for_each_connector(vkmsdev->config, connector_cfg) { + if (connector_cfg->connector == vkms_connector) + status = vkms_config_connector_get_status(connector_cfg); + } + + return status; +} + static const struct drm_connector_funcs vkms_connector_funcs = { + .detect = vkms_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -59,3 +87,10 @@ struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev) return connector; } + +void vkms_trigger_connector_hotplug(struct vkms_device *vkmsdev) +{ + struct drm_device *dev = &vkmsdev->drm; + + drm_kms_helper_hotplug_event(dev); +} diff --git a/drivers/gpu/drm/vkms/vkms_connector.h b/drivers/gpu/drm/vkms/vkms_connector.h index c9149c1b7af0..ed312f4eff3a 100644 --- a/drivers/gpu/drm/vkms/vkms_connector.h +++ b/drivers/gpu/drm/vkms/vkms_connector.h @@ -5,6 +5,9 @@ #include "vkms_drv.h" +#define drm_connector_to_vkms_connector(target) \ + container_of(target, struct vkms_connector, base) + /** * struct vkms_connector - VKMS custom type wrapping around the DRM connector * @@ -23,4 +26,10 @@ struct vkms_connector { */ struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev); +/** + * vkms_trigger_connector_hotplug() - Update the device's connectors status + * @vkmsdev: VKMS device to update + */ +void vkms_trigger_connector_hotplug(struct vkms_device *vkmsdev); + #endif /* _VKMS_CONNECTOR_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index e60573e0f3e9..9a7db1d51022 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -5,27 +5,21 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #include "vkms_drv.h" -static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) +static bool vkms_crtc_handle_vblank_timeout(struct drm_crtc *crtc) { - struct vkms_output *output = container_of(timer, struct vkms_output, - vblank_hrtimer); - struct drm_crtc *crtc = &output->crtc; + struct vkms_output *output = drm_crtc_to_vkms_output(crtc); struct vkms_crtc_state *state; - u64 ret_overrun; bool ret, fence_cookie; fence_cookie = dma_fence_begin_signalling(); - ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, - output->period_ns); - if (ret_overrun != 1) - pr_warn("%s: vblank timer overrun\n", __func__); - spin_lock(&output->lock); ret = drm_crtc_handle_vblank(crtc); if (!ret) @@ -57,55 +51,6 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) dma_fence_end_signalling(fence_cookie); - return HRTIMER_RESTART; -} - -static int vkms_enable_vblank(struct drm_crtc *crtc) -{ - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); - struct vkms_output *out = drm_crtc_to_vkms_output(crtc); - - hrtimer_setup(&out->vblank_hrtimer, &vkms_vblank_simulate, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - out->period_ns = ktime_set(0, vblank->framedur_ns); - hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); - - return 0; -} - -static void vkms_disable_vblank(struct drm_crtc *crtc) -{ - struct vkms_output *out = drm_crtc_to_vkms_output(crtc); - - hrtimer_cancel(&out->vblank_hrtimer); -} - -static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc, - int *max_error, ktime_t *vblank_time, - bool in_vblank_irq) -{ - struct vkms_output *output = drm_crtc_to_vkms_output(crtc); - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); - - if (!READ_ONCE(vblank->enabled)) { - *vblank_time = ktime_get(); - return true; - } - - *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); - - if (WARN_ON(*vblank_time == vblank->time)) - return true; - - /* - * To prevent races we roll the hrtimer forward before we do any - * interrupt processing - this is how real hw works (the interrupt is - * only generated after all the vblank registers are updated) and what - * the vblank core expects. Therefore we need to always correct the - * timestampe by one frame. - */ - *vblank_time -= output->period_ns; - return true; } @@ -159,9 +104,7 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = { .reset = vkms_atomic_crtc_reset, .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state, .atomic_destroy_state = vkms_atomic_crtc_destroy_state, - .enable_vblank = vkms_enable_vblank, - .disable_vblank = vkms_disable_vblank, - .get_vblank_timestamp = vkms_get_vblank_timestamp, + DRM_CRTC_VBLANK_TIMER_FUNCS, .get_crc_sources = vkms_get_crc_sources, .set_crc_source = vkms_set_crc_source, .verify_crc_source = vkms_verify_crc_source, @@ -185,7 +128,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, return ret; drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { - plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane); + plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane); WARN_ON(!plane_state); if (!plane_state->visible) @@ -201,7 +144,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, i = 0; drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { - plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane); + plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane); if (!plane_state->visible) continue; @@ -213,18 +156,6 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, return 0; } -static void vkms_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - drm_crtc_vblank_on(crtc); -} - -static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - drm_crtc_vblank_off(crtc); -} - static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) __acquires(&vkms_output->lock) @@ -265,8 +196,9 @@ static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = { .atomic_check = vkms_crtc_atomic_check, .atomic_begin = vkms_crtc_atomic_begin, .atomic_flush = vkms_crtc_atomic_flush, - .atomic_enable = vkms_crtc_atomic_enable, - .atomic_disable = vkms_crtc_atomic_disable, + .atomic_enable = drm_crtc_vblank_atomic_enable, + .atomic_disable = drm_crtc_vblank_atomic_disable, + .handle_vblank_timeout = vkms_crtc_handle_vblank_timeout, }; struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *primary, diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index e8472d9b6e3b..dd1402f43773 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -23,11 +23,13 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_vblank.h> #include "vkms_config.h" +#include "vkms_configfs.h" #include "vkms_drv.h" #define DRIVER_NAME "vkms" @@ -49,6 +51,14 @@ static bool enable_overlay; module_param_named(enable_overlay, enable_overlay, bool, 0444); MODULE_PARM_DESC(enable_overlay, "Enable/Disable overlay support"); +static bool enable_plane_pipeline; +module_param_named(enable_plane_pipeline, enable_plane_pipeline, bool, 0444); +MODULE_PARM_DESC(enable_plane_pipeline, "Enable/Disable plane pipeline support"); + +static bool create_default_dev = true; +module_param_named(create_default_dev, create_default_dev, bool, 0444); +MODULE_PARM_DESC(create_default_dev, "Create or not the default VKMS device"); + DEFINE_DRM_GEM_FOPS(vkms_driver_fops); static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state) @@ -146,7 +156,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev) return vkms_output_init(vkmsdev); } -static int vkms_create(struct vkms_config *config) +int vkms_create(struct vkms_config *config) { int ret; struct faux_device *fdev; @@ -214,7 +224,15 @@ static int __init vkms_init(void) int ret; struct vkms_config *config; - config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay); + ret = vkms_configfs_register(); + if (ret) + return ret; + + if (!create_default_dev) + return 0; + + config = vkms_config_default_create(enable_cursor, enable_writeback, + enable_overlay, enable_plane_pipeline); if (IS_ERR(config)) return PTR_ERR(config); @@ -229,7 +247,7 @@ static int __init vkms_init(void) return 0; } -static void vkms_destroy(struct vkms_config *config) +void vkms_destroy(struct vkms_config *config) { struct faux_device *fdev; @@ -240,6 +258,7 @@ static void vkms_destroy(struct vkms_config *config) fdev = config->dev->faux_dev; + drm_colorop_pipeline_destroy(&config->dev->drm); drm_dev_unregister(&config->dev->drm); drm_atomic_helper_shutdown(&config->dev->drm); devres_release_group(&fdev->dev, NULL); @@ -250,6 +269,8 @@ static void vkms_destroy(struct vkms_config *config) static void __exit vkms_exit(void) { + vkms_configfs_unregister(); + if (!default_config) return; diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 8013c31efe3b..0933e4ce0ff0 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -45,6 +45,10 @@ struct vkms_frame_info { unsigned int rotation; }; +struct pixel_argb_s32 { + s32 a, r, g, b; +}; + /** * struct pixel_argb_u16 - Internal representation of a pixel color. * @a: Alpha component value, stored in 16 bits, without padding, using @@ -215,8 +219,6 @@ struct vkms_output { struct drm_crtc crtc; struct drm_writeback_connector wb_connector; struct drm_encoder wb_encoder; - struct hrtimer vblank_hrtimer; - ktime_t period_ns; struct workqueue_struct *composer_workq; spinlock_t lock; @@ -227,6 +229,7 @@ struct vkms_output { }; struct vkms_config; +struct vkms_config_plane; /** * struct vkms_device - Description of a VKMS device @@ -259,6 +262,26 @@ struct vkms_device { container_of(target, struct vkms_plane_state, base.base) /** + * vkms_create() - Create a device from a configuration + * @config: Config used to configure the new device + * + * A pointer to the created vkms_device is stored in @config + * + * Returns: + * 0 on success or an error. + */ +int vkms_create(struct vkms_config *config); + +/** + * vkms_destroy() - Destroy a device + * @config: Config from which the device was created + * + * The device is completely removed, but the @config is not freed. It can be + * reused or destroyed with vkms_config_destroy(). + */ +void vkms_destroy(struct vkms_config *config); + +/** * vkms_crtc_init() - Initialize a CRTC for VKMS * @dev: DRM device associated with the VKMS buffer * @crtc: uninitialized CRTC device @@ -280,10 +303,10 @@ int vkms_output_init(struct vkms_device *vkmsdev); * vkms_plane_init() - Initialize a plane * * @vkmsdev: VKMS device containing the plane - * @type: type of plane to initialize + * @plane_cfg: plane configuration */ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, - enum drm_plane_type type); + struct vkms_config_plane *plane_cfg); /* CRC Support */ const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, @@ -300,4 +323,7 @@ void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer /* Writeback */ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev, struct vkms_output *vkms_out); +/* Colorops */ +int vkms_initialize_colorops(struct drm_plane *plane); + #endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_luts.c b/drivers/gpu/drm/vkms/vkms_luts.c new file mode 100644 index 000000000000..82cb792f10d8 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_luts.c @@ -0,0 +1,811 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <drm/drm_mode.h> + +#include "vkms_drv.h" +#include "vkms_luts.h" + +/* + * These luts were generated with a LUT generated based on + * skia's transfer function code. The LUT generator can be + * found at + * https://gitlab.freedesktop.org/hwentland/lutgen + */ + +static struct drm_color_lut linear_array[LUT_SIZE] = { + { 0x0, 0x0, 0x0, 0 }, + { 0x101, 0x101, 0x101, 0 }, + { 0x202, 0x202, 0x202, 0 }, + { 0x303, 0x303, 0x303, 0 }, + { 0x404, 0x404, 0x404, 0 }, + { 0x505, 0x505, 0x505, 0 }, + { 0x606, 0x606, 0x606, 0 }, + { 0x707, 0x707, 0x707, 0 }, + { 0x808, 0x808, 0x808, 0 }, + { 0x909, 0x909, 0x909, 0 }, + { 0xa0a, 0xa0a, 0xa0a, 0 }, + { 0xb0b, 0xb0b, 0xb0b, 0 }, + { 0xc0c, 0xc0c, 0xc0c, 0 }, + { 0xd0d, 0xd0d, 0xd0d, 0 }, + { 0xe0e, 0xe0e, 0xe0e, 0 }, + { 0xf0f, 0xf0f, 0xf0f, 0 }, + { 0x1010, 0x1010, 0x1010, 0 }, + { 0x1111, 0x1111, 0x1111, 0 }, + { 0x1212, 0x1212, 0x1212, 0 }, + { 0x1313, 0x1313, 0x1313, 0 }, + { 0x1414, 0x1414, 0x1414, 0 }, + { 0x1515, 0x1515, 0x1515, 0 }, + { 0x1616, 0x1616, 0x1616, 0 }, + { 0x1717, 0x1717, 0x1717, 0 }, + { 0x1818, 0x1818, 0x1818, 0 }, + { 0x1919, 0x1919, 0x1919, 0 }, + { 0x1a1a, 0x1a1a, 0x1a1a, 0 }, + { 0x1b1b, 0x1b1b, 0x1b1b, 0 }, + { 0x1c1c, 0x1c1c, 0x1c1c, 0 }, + { 0x1d1d, 0x1d1d, 0x1d1d, 0 }, + { 0x1e1e, 0x1e1e, 0x1e1e, 0 }, + { 0x1f1f, 0x1f1f, 0x1f1f, 0 }, + { 0x2020, 0x2020, 0x2020, 0 }, + { 0x2121, 0x2121, 0x2121, 0 }, + { 0x2222, 0x2222, 0x2222, 0 }, + { 0x2323, 0x2323, 0x2323, 0 }, + { 0x2424, 0x2424, 0x2424, 0 }, + { 0x2525, 0x2525, 0x2525, 0 }, + { 0x2626, 0x2626, 0x2626, 0 }, + { 0x2727, 0x2727, 0x2727, 0 }, + { 0x2828, 0x2828, 0x2828, 0 }, + { 0x2929, 0x2929, 0x2929, 0 }, + { 0x2a2a, 0x2a2a, 0x2a2a, 0 }, + { 0x2b2b, 0x2b2b, 0x2b2b, 0 }, + { 0x2c2c, 0x2c2c, 0x2c2c, 0 }, + { 0x2d2d, 0x2d2d, 0x2d2d, 0 }, + { 0x2e2e, 0x2e2e, 0x2e2e, 0 }, + { 0x2f2f, 0x2f2f, 0x2f2f, 0 }, + { 0x3030, 0x3030, 0x3030, 0 }, + { 0x3131, 0x3131, 0x3131, 0 }, + { 0x3232, 0x3232, 0x3232, 0 }, + { 0x3333, 0x3333, 0x3333, 0 }, + { 0x3434, 0x3434, 0x3434, 0 }, + { 0x3535, 0x3535, 0x3535, 0 }, + { 0x3636, 0x3636, 0x3636, 0 }, + { 0x3737, 0x3737, 0x3737, 0 }, + { 0x3838, 0x3838, 0x3838, 0 }, + { 0x3939, 0x3939, 0x3939, 0 }, + { 0x3a3a, 0x3a3a, 0x3a3a, 0 }, + { 0x3b3b, 0x3b3b, 0x3b3b, 0 }, + { 0x3c3c, 0x3c3c, 0x3c3c, 0 }, + { 0x3d3d, 0x3d3d, 0x3d3d, 0 }, + { 0x3e3e, 0x3e3e, 0x3e3e, 0 }, + { 0x3f3f, 0x3f3f, 0x3f3f, 0 }, + { 0x4040, 0x4040, 0x4040, 0 }, + { 0x4141, 0x4141, 0x4141, 0 }, + { 0x4242, 0x4242, 0x4242, 0 }, + { 0x4343, 0x4343, 0x4343, 0 }, + { 0x4444, 0x4444, 0x4444, 0 }, + { 0x4545, 0x4545, 0x4545, 0 }, + { 0x4646, 0x4646, 0x4646, 0 }, + { 0x4747, 0x4747, 0x4747, 0 }, + { 0x4848, 0x4848, 0x4848, 0 }, + { 0x4949, 0x4949, 0x4949, 0 }, + { 0x4a4a, 0x4a4a, 0x4a4a, 0 }, + { 0x4b4b, 0x4b4b, 0x4b4b, 0 }, + { 0x4c4c, 0x4c4c, 0x4c4c, 0 }, + { 0x4d4d, 0x4d4d, 0x4d4d, 0 }, + { 0x4e4e, 0x4e4e, 0x4e4e, 0 }, + { 0x4f4f, 0x4f4f, 0x4f4f, 0 }, + { 0x5050, 0x5050, 0x5050, 0 }, + { 0x5151, 0x5151, 0x5151, 0 }, + { 0x5252, 0x5252, 0x5252, 0 }, + { 0x5353, 0x5353, 0x5353, 0 }, + { 0x5454, 0x5454, 0x5454, 0 }, + { 0x5555, 0x5555, 0x5555, 0 }, + { 0x5656, 0x5656, 0x5656, 0 }, + { 0x5757, 0x5757, 0x5757, 0 }, + { 0x5858, 0x5858, 0x5858, 0 }, + { 0x5959, 0x5959, 0x5959, 0 }, + { 0x5a5a, 0x5a5a, 0x5a5a, 0 }, + { 0x5b5b, 0x5b5b, 0x5b5b, 0 }, + { 0x5c5c, 0x5c5c, 0x5c5c, 0 }, + { 0x5d5d, 0x5d5d, 0x5d5d, 0 }, + { 0x5e5e, 0x5e5e, 0x5e5e, 0 }, + { 0x5f5f, 0x5f5f, 0x5f5f, 0 }, + { 0x6060, 0x6060, 0x6060, 0 }, + { 0x6161, 0x6161, 0x6161, 0 }, + { 0x6262, 0x6262, 0x6262, 0 }, + { 0x6363, 0x6363, 0x6363, 0 }, + { 0x6464, 0x6464, 0x6464, 0 }, + { 0x6565, 0x6565, 0x6565, 0 }, + { 0x6666, 0x6666, 0x6666, 0 }, + { 0x6767, 0x6767, 0x6767, 0 }, + { 0x6868, 0x6868, 0x6868, 0 }, + { 0x6969, 0x6969, 0x6969, 0 }, + { 0x6a6a, 0x6a6a, 0x6a6a, 0 }, + { 0x6b6b, 0x6b6b, 0x6b6b, 0 }, + { 0x6c6c, 0x6c6c, 0x6c6c, 0 }, + { 0x6d6d, 0x6d6d, 0x6d6d, 0 }, + { 0x6e6e, 0x6e6e, 0x6e6e, 0 }, + { 0x6f6f, 0x6f6f, 0x6f6f, 0 }, + { 0x7070, 0x7070, 0x7070, 0 }, + { 0x7171, 0x7171, 0x7171, 0 }, + { 0x7272, 0x7272, 0x7272, 0 }, + { 0x7373, 0x7373, 0x7373, 0 }, + { 0x7474, 0x7474, 0x7474, 0 }, + { 0x7575, 0x7575, 0x7575, 0 }, + { 0x7676, 0x7676, 0x7676, 0 }, + { 0x7777, 0x7777, 0x7777, 0 }, + { 0x7878, 0x7878, 0x7878, 0 }, + { 0x7979, 0x7979, 0x7979, 0 }, + { 0x7a7a, 0x7a7a, 0x7a7a, 0 }, + { 0x7b7b, 0x7b7b, 0x7b7b, 0 }, + { 0x7c7c, 0x7c7c, 0x7c7c, 0 }, + { 0x7d7d, 0x7d7d, 0x7d7d, 0 }, + { 0x7e7e, 0x7e7e, 0x7e7e, 0 }, + { 0x7f7f, 0x7f7f, 0x7f7f, 0 }, + { 0x8080, 0x8080, 0x8080, 0 }, + { 0x8181, 0x8181, 0x8181, 0 }, + { 0x8282, 0x8282, 0x8282, 0 }, + { 0x8383, 0x8383, 0x8383, 0 }, + { 0x8484, 0x8484, 0x8484, 0 }, + { 0x8585, 0x8585, 0x8585, 0 }, + { 0x8686, 0x8686, 0x8686, 0 }, + { 0x8787, 0x8787, 0x8787, 0 }, + { 0x8888, 0x8888, 0x8888, 0 }, + { 0x8989, 0x8989, 0x8989, 0 }, + { 0x8a8a, 0x8a8a, 0x8a8a, 0 }, + { 0x8b8b, 0x8b8b, 0x8b8b, 0 }, + { 0x8c8c, 0x8c8c, 0x8c8c, 0 }, + { 0x8d8d, 0x8d8d, 0x8d8d, 0 }, + { 0x8e8e, 0x8e8e, 0x8e8e, 0 }, + { 0x8f8f, 0x8f8f, 0x8f8f, 0 }, + { 0x9090, 0x9090, 0x9090, 0 }, + { 0x9191, 0x9191, 0x9191, 0 }, + { 0x9292, 0x9292, 0x9292, 0 }, + { 0x9393, 0x9393, 0x9393, 0 }, + { 0x9494, 0x9494, 0x9494, 0 }, + { 0x9595, 0x9595, 0x9595, 0 }, + { 0x9696, 0x9696, 0x9696, 0 }, + { 0x9797, 0x9797, 0x9797, 0 }, + { 0x9898, 0x9898, 0x9898, 0 }, + { 0x9999, 0x9999, 0x9999, 0 }, + { 0x9a9a, 0x9a9a, 0x9a9a, 0 }, + { 0x9b9b, 0x9b9b, 0x9b9b, 0 }, + { 0x9c9c, 0x9c9c, 0x9c9c, 0 }, + { 0x9d9d, 0x9d9d, 0x9d9d, 0 }, + { 0x9e9e, 0x9e9e, 0x9e9e, 0 }, + { 0x9f9f, 0x9f9f, 0x9f9f, 0 }, + { 0xa0a0, 0xa0a0, 0xa0a0, 0 }, + { 0xa1a1, 0xa1a1, 0xa1a1, 0 }, + { 0xa2a2, 0xa2a2, 0xa2a2, 0 }, + { 0xa3a3, 0xa3a3, 0xa3a3, 0 }, + { 0xa4a4, 0xa4a4, 0xa4a4, 0 }, + { 0xa5a5, 0xa5a5, 0xa5a5, 0 }, + { 0xa6a6, 0xa6a6, 0xa6a6, 0 }, + { 0xa7a7, 0xa7a7, 0xa7a7, 0 }, + { 0xa8a8, 0xa8a8, 0xa8a8, 0 }, + { 0xa9a9, 0xa9a9, 0xa9a9, 0 }, + { 0xaaaa, 0xaaaa, 0xaaaa, 0 }, + { 0xabab, 0xabab, 0xabab, 0 }, + { 0xacac, 0xacac, 0xacac, 0 }, + { 0xadad, 0xadad, 0xadad, 0 }, + { 0xaeae, 0xaeae, 0xaeae, 0 }, + { 0xafaf, 0xafaf, 0xafaf, 0 }, + { 0xb0b0, 0xb0b0, 0xb0b0, 0 }, + { 0xb1b1, 0xb1b1, 0xb1b1, 0 }, + { 0xb2b2, 0xb2b2, 0xb2b2, 0 }, + { 0xb3b3, 0xb3b3, 0xb3b3, 0 }, + { 0xb4b4, 0xb4b4, 0xb4b4, 0 }, + { 0xb5b5, 0xb5b5, 0xb5b5, 0 }, + { 0xb6b6, 0xb6b6, 0xb6b6, 0 }, + { 0xb7b7, 0xb7b7, 0xb7b7, 0 }, + { 0xb8b8, 0xb8b8, 0xb8b8, 0 }, + { 0xb9b9, 0xb9b9, 0xb9b9, 0 }, + { 0xbaba, 0xbaba, 0xbaba, 0 }, + { 0xbbbb, 0xbbbb, 0xbbbb, 0 }, + { 0xbcbc, 0xbcbc, 0xbcbc, 0 }, + { 0xbdbd, 0xbdbd, 0xbdbd, 0 }, + { 0xbebe, 0xbebe, 0xbebe, 0 }, + { 0xbfbf, 0xbfbf, 0xbfbf, 0 }, + { 0xc0c0, 0xc0c0, 0xc0c0, 0 }, + { 0xc1c1, 0xc1c1, 0xc1c1, 0 }, + { 0xc2c2, 0xc2c2, 0xc2c2, 0 }, + { 0xc3c3, 0xc3c3, 0xc3c3, 0 }, + { 0xc4c4, 0xc4c4, 0xc4c4, 0 }, + { 0xc5c5, 0xc5c5, 0xc5c5, 0 }, + { 0xc6c6, 0xc6c6, 0xc6c6, 0 }, + { 0xc7c7, 0xc7c7, 0xc7c7, 0 }, + { 0xc8c8, 0xc8c8, 0xc8c8, 0 }, + { 0xc9c9, 0xc9c9, 0xc9c9, 0 }, + { 0xcaca, 0xcaca, 0xcaca, 0 }, + { 0xcbcb, 0xcbcb, 0xcbcb, 0 }, + { 0xcccc, 0xcccc, 0xcccc, 0 }, + { 0xcdcd, 0xcdcd, 0xcdcd, 0 }, + { 0xcece, 0xcece, 0xcece, 0 }, + { 0xcfcf, 0xcfcf, 0xcfcf, 0 }, + { 0xd0d0, 0xd0d0, 0xd0d0, 0 }, + { 0xd1d1, 0xd1d1, 0xd1d1, 0 }, + { 0xd2d2, 0xd2d2, 0xd2d2, 0 }, + { 0xd3d3, 0xd3d3, 0xd3d3, 0 }, + { 0xd4d4, 0xd4d4, 0xd4d4, 0 }, + { 0xd5d5, 0xd5d5, 0xd5d5, 0 }, + { 0xd6d6, 0xd6d6, 0xd6d6, 0 }, + { 0xd7d7, 0xd7d7, 0xd7d7, 0 }, + { 0xd8d8, 0xd8d8, 0xd8d8, 0 }, + { 0xd9d9, 0xd9d9, 0xd9d9, 0 }, + { 0xdada, 0xdada, 0xdada, 0 }, + { 0xdbdb, 0xdbdb, 0xdbdb, 0 }, + { 0xdcdc, 0xdcdc, 0xdcdc, 0 }, + { 0xdddd, 0xdddd, 0xdddd, 0 }, + { 0xdede, 0xdede, 0xdede, 0 }, + { 0xdfdf, 0xdfdf, 0xdfdf, 0 }, + { 0xe0e0, 0xe0e0, 0xe0e0, 0 }, + { 0xe1e1, 0xe1e1, 0xe1e1, 0 }, + { 0xe2e2, 0xe2e2, 0xe2e2, 0 }, + { 0xe3e3, 0xe3e3, 0xe3e3, 0 }, + { 0xe4e4, 0xe4e4, 0xe4e4, 0 }, + { 0xe5e5, 0xe5e5, 0xe5e5, 0 }, + { 0xe6e6, 0xe6e6, 0xe6e6, 0 }, + { 0xe7e7, 0xe7e7, 0xe7e7, 0 }, + { 0xe8e8, 0xe8e8, 0xe8e8, 0 }, + { 0xe9e9, 0xe9e9, 0xe9e9, 0 }, + { 0xeaea, 0xeaea, 0xeaea, 0 }, + { 0xebeb, 0xebeb, 0xebeb, 0 }, + { 0xecec, 0xecec, 0xecec, 0 }, + { 0xeded, 0xeded, 0xeded, 0 }, + { 0xeeee, 0xeeee, 0xeeee, 0 }, + { 0xefef, 0xefef, 0xefef, 0 }, + { 0xf0f0, 0xf0f0, 0xf0f0, 0 }, + { 0xf1f1, 0xf1f1, 0xf1f1, 0 }, + { 0xf2f2, 0xf2f2, 0xf2f2, 0 }, + { 0xf3f3, 0xf3f3, 0xf3f3, 0 }, + { 0xf4f4, 0xf4f4, 0xf4f4, 0 }, + { 0xf5f5, 0xf5f5, 0xf5f5, 0 }, + { 0xf6f6, 0xf6f6, 0xf6f6, 0 }, + { 0xf7f7, 0xf7f7, 0xf7f7, 0 }, + { 0xf8f8, 0xf8f8, 0xf8f8, 0 }, + { 0xf9f9, 0xf9f9, 0xf9f9, 0 }, + { 0xfafa, 0xfafa, 0xfafa, 0 }, + { 0xfbfb, 0xfbfb, 0xfbfb, 0 }, + { 0xfcfc, 0xfcfc, 0xfcfc, 0 }, + { 0xfdfd, 0xfdfd, 0xfdfd, 0 }, + { 0xfefe, 0xfefe, 0xfefe, 0 }, + { 0xffff, 0xffff, 0xffff, 0 }, +}; + +const struct vkms_color_lut linear_eotf = { + .base = linear_array, + .lut_length = LUT_SIZE, + .channel_value2index_ratio = 0xff00ffll +}; +EXPORT_SYMBOL(linear_eotf); + +static struct drm_color_lut srgb_array[LUT_SIZE] = { + { 0x0, 0x0, 0x0, 0 }, + { 0x13, 0x13, 0x13, 0 }, + { 0x27, 0x27, 0x27, 0 }, + { 0x3b, 0x3b, 0x3b, 0 }, + { 0x4f, 0x4f, 0x4f, 0 }, + { 0x63, 0x63, 0x63, 0 }, + { 0x77, 0x77, 0x77, 0 }, + { 0x8b, 0x8b, 0x8b, 0 }, + { 0x9f, 0x9f, 0x9f, 0 }, + { 0xb3, 0xb3, 0xb3, 0 }, + { 0xc6, 0xc6, 0xc6, 0 }, + { 0xdb, 0xdb, 0xdb, 0 }, + { 0xf0, 0xf0, 0xf0, 0 }, + { 0x107, 0x107, 0x107, 0 }, + { 0x11f, 0x11f, 0x11f, 0 }, + { 0x139, 0x139, 0x139, 0 }, + { 0x153, 0x153, 0x153, 0 }, + { 0x16f, 0x16f, 0x16f, 0 }, + { 0x18c, 0x18c, 0x18c, 0 }, + { 0x1aa, 0x1aa, 0x1aa, 0 }, + { 0x1ca, 0x1ca, 0x1ca, 0 }, + { 0x1eb, 0x1eb, 0x1eb, 0 }, + { 0x20d, 0x20d, 0x20d, 0 }, + { 0x231, 0x231, 0x231, 0 }, + { 0x256, 0x256, 0x256, 0 }, + { 0x27d, 0x27d, 0x27d, 0 }, + { 0x2a4, 0x2a4, 0x2a4, 0 }, + { 0x2ce, 0x2ce, 0x2ce, 0 }, + { 0x2f9, 0x2f9, 0x2f9, 0 }, + { 0x325, 0x325, 0x325, 0 }, + { 0x352, 0x352, 0x352, 0 }, + { 0x381, 0x381, 0x381, 0 }, + { 0x3b2, 0x3b2, 0x3b2, 0 }, + { 0x3e4, 0x3e4, 0x3e4, 0 }, + { 0x418, 0x418, 0x418, 0 }, + { 0x44d, 0x44d, 0x44d, 0 }, + { 0x484, 0x484, 0x484, 0 }, + { 0x4bc, 0x4bc, 0x4bc, 0 }, + { 0x4f6, 0x4f6, 0x4f6, 0 }, + { 0x531, 0x531, 0x531, 0 }, + { 0x56e, 0x56e, 0x56e, 0 }, + { 0x5ad, 0x5ad, 0x5ad, 0 }, + { 0x5ed, 0x5ed, 0x5ed, 0 }, + { 0x62f, 0x62f, 0x62f, 0 }, + { 0x672, 0x672, 0x672, 0 }, + { 0x6b7, 0x6b7, 0x6b7, 0 }, + { 0x6fe, 0x6fe, 0x6fe, 0 }, + { 0x746, 0x746, 0x746, 0 }, + { 0x791, 0x791, 0x791, 0 }, + { 0x7dc, 0x7dc, 0x7dc, 0 }, + { 0x82a, 0x82a, 0x82a, 0 }, + { 0x879, 0x879, 0x879, 0 }, + { 0x8ca, 0x8ca, 0x8ca, 0 }, + { 0x91d, 0x91d, 0x91d, 0 }, + { 0x971, 0x971, 0x971, 0 }, + { 0x9c7, 0x9c7, 0x9c7, 0 }, + { 0xa1f, 0xa1f, 0xa1f, 0 }, + { 0xa79, 0xa79, 0xa79, 0 }, + { 0xad4, 0xad4, 0xad4, 0 }, + { 0xb32, 0xb32, 0xb32, 0 }, + { 0xb91, 0xb91, 0xb91, 0 }, + { 0xbf2, 0xbf2, 0xbf2, 0 }, + { 0xc54, 0xc54, 0xc54, 0 }, + { 0xcb9, 0xcb9, 0xcb9, 0 }, + { 0xd1f, 0xd1f, 0xd1f, 0 }, + { 0xd88, 0xd88, 0xd88, 0 }, + { 0xdf2, 0xdf2, 0xdf2, 0 }, + { 0xe5e, 0xe5e, 0xe5e, 0 }, + { 0xecc, 0xecc, 0xecc, 0 }, + { 0xf3c, 0xf3c, 0xf3c, 0 }, + { 0xfad, 0xfad, 0xfad, 0 }, + { 0x1021, 0x1021, 0x1021, 0 }, + { 0x1096, 0x1096, 0x1096, 0 }, + { 0x110e, 0x110e, 0x110e, 0 }, + { 0x1187, 0x1187, 0x1187, 0 }, + { 0x1203, 0x1203, 0x1203, 0 }, + { 0x1280, 0x1280, 0x1280, 0 }, + { 0x12ff, 0x12ff, 0x12ff, 0 }, + { 0x1380, 0x1380, 0x1380, 0 }, + { 0x1404, 0x1404, 0x1404, 0 }, + { 0x1489, 0x1489, 0x1489, 0 }, + { 0x1510, 0x1510, 0x1510, 0 }, + { 0x1599, 0x1599, 0x1599, 0 }, + { 0x1624, 0x1624, 0x1624, 0 }, + { 0x16b2, 0x16b2, 0x16b2, 0 }, + { 0x1741, 0x1741, 0x1741, 0 }, + { 0x17d2, 0x17d2, 0x17d2, 0 }, + { 0x1865, 0x1865, 0x1865, 0 }, + { 0x18fb, 0x18fb, 0x18fb, 0 }, + { 0x1992, 0x1992, 0x1992, 0 }, + { 0x1a2c, 0x1a2c, 0x1a2c, 0 }, + { 0x1ac8, 0x1ac8, 0x1ac8, 0 }, + { 0x1b65, 0x1b65, 0x1b65, 0 }, + { 0x1c05, 0x1c05, 0x1c05, 0 }, + { 0x1ca7, 0x1ca7, 0x1ca7, 0 }, + { 0x1d4b, 0x1d4b, 0x1d4b, 0 }, + { 0x1df1, 0x1df1, 0x1df1, 0 }, + { 0x1e99, 0x1e99, 0x1e99, 0 }, + { 0x1f44, 0x1f44, 0x1f44, 0 }, + { 0x1ff0, 0x1ff0, 0x1ff0, 0 }, + { 0x209f, 0x209f, 0x209f, 0 }, + { 0x2150, 0x2150, 0x2150, 0 }, + { 0x2203, 0x2203, 0x2203, 0 }, + { 0x22b8, 0x22b8, 0x22b8, 0 }, + { 0x2370, 0x2370, 0x2370, 0 }, + { 0x2429, 0x2429, 0x2429, 0 }, + { 0x24e5, 0x24e5, 0x24e5, 0 }, + { 0x25a3, 0x25a3, 0x25a3, 0 }, + { 0x2663, 0x2663, 0x2663, 0 }, + { 0x2726, 0x2726, 0x2726, 0 }, + { 0x27ea, 0x27ea, 0x27ea, 0 }, + { 0x28b1, 0x28b1, 0x28b1, 0 }, + { 0x297a, 0x297a, 0x297a, 0 }, + { 0x2a45, 0x2a45, 0x2a45, 0 }, + { 0x2b13, 0x2b13, 0x2b13, 0 }, + { 0x2be3, 0x2be3, 0x2be3, 0 }, + { 0x2cb5, 0x2cb5, 0x2cb5, 0 }, + { 0x2d89, 0x2d89, 0x2d89, 0 }, + { 0x2e60, 0x2e60, 0x2e60, 0 }, + { 0x2f39, 0x2f39, 0x2f39, 0 }, + { 0x3014, 0x3014, 0x3014, 0 }, + { 0x30f2, 0x30f2, 0x30f2, 0 }, + { 0x31d2, 0x31d2, 0x31d2, 0 }, + { 0x32b4, 0x32b4, 0x32b4, 0 }, + { 0x3398, 0x3398, 0x3398, 0 }, + { 0x347f, 0x347f, 0x347f, 0 }, + { 0x3569, 0x3569, 0x3569, 0 }, + { 0x3654, 0x3654, 0x3654, 0 }, + { 0x3742, 0x3742, 0x3742, 0 }, + { 0x3832, 0x3832, 0x3832, 0 }, + { 0x3925, 0x3925, 0x3925, 0 }, + { 0x3a1a, 0x3a1a, 0x3a1a, 0 }, + { 0x3b11, 0x3b11, 0x3b11, 0 }, + { 0x3c0b, 0x3c0b, 0x3c0b, 0 }, + { 0x3d07, 0x3d07, 0x3d07, 0 }, + { 0x3e05, 0x3e05, 0x3e05, 0 }, + { 0x3f06, 0x3f06, 0x3f06, 0 }, + { 0x400a, 0x400a, 0x400a, 0 }, + { 0x410f, 0x410f, 0x410f, 0 }, + { 0x4218, 0x4218, 0x4218, 0 }, + { 0x4322, 0x4322, 0x4322, 0 }, + { 0x442f, 0x442f, 0x442f, 0 }, + { 0x453f, 0x453f, 0x453f, 0 }, + { 0x4650, 0x4650, 0x4650, 0 }, + { 0x4765, 0x4765, 0x4765, 0 }, + { 0x487c, 0x487c, 0x487c, 0 }, + { 0x4995, 0x4995, 0x4995, 0 }, + { 0x4ab1, 0x4ab1, 0x4ab1, 0 }, + { 0x4bcf, 0x4bcf, 0x4bcf, 0 }, + { 0x4cf0, 0x4cf0, 0x4cf0, 0 }, + { 0x4e13, 0x4e13, 0x4e13, 0 }, + { 0x4f39, 0x4f39, 0x4f39, 0 }, + { 0x5061, 0x5061, 0x5061, 0 }, + { 0x518b, 0x518b, 0x518b, 0 }, + { 0x52b9, 0x52b9, 0x52b9, 0 }, + { 0x53e8, 0x53e8, 0x53e8, 0 }, + { 0x551b, 0x551b, 0x551b, 0 }, + { 0x5650, 0x5650, 0x5650, 0 }, + { 0x5787, 0x5787, 0x5787, 0 }, + { 0x58c1, 0x58c1, 0x58c1, 0 }, + { 0x59fd, 0x59fd, 0x59fd, 0 }, + { 0x5b3c, 0x5b3c, 0x5b3c, 0 }, + { 0x5c7e, 0x5c7e, 0x5c7e, 0 }, + { 0x5dc2, 0x5dc2, 0x5dc2, 0 }, + { 0x5f09, 0x5f09, 0x5f09, 0 }, + { 0x6052, 0x6052, 0x6052, 0 }, + { 0x619e, 0x619e, 0x619e, 0 }, + { 0x62ec, 0x62ec, 0x62ec, 0 }, + { 0x643d, 0x643d, 0x643d, 0 }, + { 0x6591, 0x6591, 0x6591, 0 }, + { 0x66e7, 0x66e7, 0x66e7, 0 }, + { 0x6840, 0x6840, 0x6840, 0 }, + { 0x699b, 0x699b, 0x699b, 0 }, + { 0x6afa, 0x6afa, 0x6afa, 0 }, + { 0x6c5a, 0x6c5a, 0x6c5a, 0 }, + { 0x6dbe, 0x6dbe, 0x6dbe, 0 }, + { 0x6f24, 0x6f24, 0x6f24, 0 }, + { 0x708c, 0x708c, 0x708c, 0 }, + { 0x71f8, 0x71f8, 0x71f8, 0 }, + { 0x7366, 0x7366, 0x7366, 0 }, + { 0x74d6, 0x74d6, 0x74d6, 0 }, + { 0x764a, 0x764a, 0x764a, 0 }, + { 0x77c0, 0x77c0, 0x77c0, 0 }, + { 0x7938, 0x7938, 0x7938, 0 }, + { 0x7ab4, 0x7ab4, 0x7ab4, 0 }, + { 0x7c32, 0x7c32, 0x7c32, 0 }, + { 0x7db3, 0x7db3, 0x7db3, 0 }, + { 0x7f36, 0x7f36, 0x7f36, 0 }, + { 0x80bc, 0x80bc, 0x80bc, 0 }, + { 0x8245, 0x8245, 0x8245, 0 }, + { 0x83d1, 0x83d1, 0x83d1, 0 }, + { 0x855f, 0x855f, 0x855f, 0 }, + { 0x86f0, 0x86f0, 0x86f0, 0 }, + { 0x8884, 0x8884, 0x8884, 0 }, + { 0x8a1a, 0x8a1a, 0x8a1a, 0 }, + { 0x8bb4, 0x8bb4, 0x8bb4, 0 }, + { 0x8d50, 0x8d50, 0x8d50, 0 }, + { 0x8eee, 0x8eee, 0x8eee, 0 }, + { 0x9090, 0x9090, 0x9090, 0 }, + { 0x9234, 0x9234, 0x9234, 0 }, + { 0x93db, 0x93db, 0x93db, 0 }, + { 0x9585, 0x9585, 0x9585, 0 }, + { 0x9732, 0x9732, 0x9732, 0 }, + { 0x98e1, 0x98e1, 0x98e1, 0 }, + { 0x9a93, 0x9a93, 0x9a93, 0 }, + { 0x9c48, 0x9c48, 0x9c48, 0 }, + { 0x9e00, 0x9e00, 0x9e00, 0 }, + { 0x9fbb, 0x9fbb, 0x9fbb, 0 }, + { 0xa178, 0xa178, 0xa178, 0 }, + { 0xa338, 0xa338, 0xa338, 0 }, + { 0xa4fb, 0xa4fb, 0xa4fb, 0 }, + { 0xa6c1, 0xa6c1, 0xa6c1, 0 }, + { 0xa88a, 0xa88a, 0xa88a, 0 }, + { 0xaa56, 0xaa56, 0xaa56, 0 }, + { 0xac24, 0xac24, 0xac24, 0 }, + { 0xadf5, 0xadf5, 0xadf5, 0 }, + { 0xafc9, 0xafc9, 0xafc9, 0 }, + { 0xb1a0, 0xb1a0, 0xb1a0, 0 }, + { 0xb37a, 0xb37a, 0xb37a, 0 }, + { 0xb557, 0xb557, 0xb557, 0 }, + { 0xb736, 0xb736, 0xb736, 0 }, + { 0xb919, 0xb919, 0xb919, 0 }, + { 0xbafe, 0xbafe, 0xbafe, 0 }, + { 0xbce6, 0xbce6, 0xbce6, 0 }, + { 0xbed2, 0xbed2, 0xbed2, 0 }, + { 0xc0c0, 0xc0c0, 0xc0c0, 0 }, + { 0xc2b0, 0xc2b0, 0xc2b0, 0 }, + { 0xc4a4, 0xc4a4, 0xc4a4, 0 }, + { 0xc69b, 0xc69b, 0xc69b, 0 }, + { 0xc895, 0xc895, 0xc895, 0 }, + { 0xca91, 0xca91, 0xca91, 0 }, + { 0xcc91, 0xcc91, 0xcc91, 0 }, + { 0xce93, 0xce93, 0xce93, 0 }, + { 0xd098, 0xd098, 0xd098, 0 }, + { 0xd2a1, 0xd2a1, 0xd2a1, 0 }, + { 0xd4ac, 0xd4ac, 0xd4ac, 0 }, + { 0xd6ba, 0xd6ba, 0xd6ba, 0 }, + { 0xd8cb, 0xd8cb, 0xd8cb, 0 }, + { 0xdadf, 0xdadf, 0xdadf, 0 }, + { 0xdcf7, 0xdcf7, 0xdcf7, 0 }, + { 0xdf11, 0xdf11, 0xdf11, 0 }, + { 0xe12e, 0xe12e, 0xe12e, 0 }, + { 0xe34e, 0xe34e, 0xe34e, 0 }, + { 0xe571, 0xe571, 0xe571, 0 }, + { 0xe796, 0xe796, 0xe796, 0 }, + { 0xe9bf, 0xe9bf, 0xe9bf, 0 }, + { 0xebeb, 0xebeb, 0xebeb, 0 }, + { 0xee1a, 0xee1a, 0xee1a, 0 }, + { 0xf04c, 0xf04c, 0xf04c, 0 }, + { 0xf281, 0xf281, 0xf281, 0 }, + { 0xf4b9, 0xf4b9, 0xf4b9, 0 }, + { 0xf6f4, 0xf6f4, 0xf6f4, 0 }, + { 0xf932, 0xf932, 0xf932, 0 }, + { 0xfb73, 0xfb73, 0xfb73, 0 }, + { 0xfdb7, 0xfdb7, 0xfdb7, 0 }, + { 0xffff, 0xffff, 0xffff, 0 }, +}; + +const struct vkms_color_lut srgb_eotf = { + .base = srgb_array, + .lut_length = LUT_SIZE, + .channel_value2index_ratio = 0xff00ffll +}; +EXPORT_SYMBOL(srgb_eotf); + +static struct drm_color_lut srgb_inv_array[LUT_SIZE] = { + { 0x0, 0x0, 0x0, 0 }, + { 0xcc2, 0xcc2, 0xcc2, 0 }, + { 0x15be, 0x15be, 0x15be, 0 }, + { 0x1c56, 0x1c56, 0x1c56, 0 }, + { 0x21bd, 0x21bd, 0x21bd, 0 }, + { 0x2666, 0x2666, 0x2666, 0 }, + { 0x2a8a, 0x2a8a, 0x2a8a, 0 }, + { 0x2e4c, 0x2e4c, 0x2e4c, 0 }, + { 0x31c0, 0x31c0, 0x31c0, 0 }, + { 0x34f6, 0x34f6, 0x34f6, 0 }, + { 0x37f9, 0x37f9, 0x37f9, 0 }, + { 0x3acf, 0x3acf, 0x3acf, 0 }, + { 0x3d80, 0x3d80, 0x3d80, 0 }, + { 0x4010, 0x4010, 0x4010, 0 }, + { 0x4284, 0x4284, 0x4284, 0 }, + { 0x44dd, 0x44dd, 0x44dd, 0 }, + { 0x4720, 0x4720, 0x4720, 0 }, + { 0x494e, 0x494e, 0x494e, 0 }, + { 0x4b69, 0x4b69, 0x4b69, 0 }, + { 0x4d73, 0x4d73, 0x4d73, 0 }, + { 0x4f6e, 0x4f6e, 0x4f6e, 0 }, + { 0x5159, 0x5159, 0x5159, 0 }, + { 0x5337, 0x5337, 0x5337, 0 }, + { 0x5509, 0x5509, 0x5509, 0 }, + { 0x56cf, 0x56cf, 0x56cf, 0 }, + { 0x588a, 0x588a, 0x588a, 0 }, + { 0x5a3b, 0x5a3b, 0x5a3b, 0 }, + { 0x5be2, 0x5be2, 0x5be2, 0 }, + { 0x5d80, 0x5d80, 0x5d80, 0 }, + { 0x5f16, 0x5f16, 0x5f16, 0 }, + { 0x60a4, 0x60a4, 0x60a4, 0 }, + { 0x6229, 0x6229, 0x6229, 0 }, + { 0x63a8, 0x63a8, 0x63a8, 0 }, + { 0x6520, 0x6520, 0x6520, 0 }, + { 0x6691, 0x6691, 0x6691, 0 }, + { 0x67fc, 0x67fc, 0x67fc, 0 }, + { 0x6961, 0x6961, 0x6961, 0 }, + { 0x6ac0, 0x6ac0, 0x6ac0, 0 }, + { 0x6c19, 0x6c19, 0x6c19, 0 }, + { 0x6d6e, 0x6d6e, 0x6d6e, 0 }, + { 0x6ebd, 0x6ebd, 0x6ebd, 0 }, + { 0x7008, 0x7008, 0x7008, 0 }, + { 0x714d, 0x714d, 0x714d, 0 }, + { 0x728f, 0x728f, 0x728f, 0 }, + { 0x73cc, 0x73cc, 0x73cc, 0 }, + { 0x7504, 0x7504, 0x7504, 0 }, + { 0x7639, 0x7639, 0x7639, 0 }, + { 0x776a, 0x776a, 0x776a, 0 }, + { 0x7897, 0x7897, 0x7897, 0 }, + { 0x79c1, 0x79c1, 0x79c1, 0 }, + { 0x7ae7, 0x7ae7, 0x7ae7, 0 }, + { 0x7c09, 0x7c09, 0x7c09, 0 }, + { 0x7d28, 0x7d28, 0x7d28, 0 }, + { 0x7e44, 0x7e44, 0x7e44, 0 }, + { 0x7f5d, 0x7f5d, 0x7f5d, 0 }, + { 0x8073, 0x8073, 0x8073, 0 }, + { 0x8186, 0x8186, 0x8186, 0 }, + { 0x8296, 0x8296, 0x8296, 0 }, + { 0x83a4, 0x83a4, 0x83a4, 0 }, + { 0x84ae, 0x84ae, 0x84ae, 0 }, + { 0x85b6, 0x85b6, 0x85b6, 0 }, + { 0x86bc, 0x86bc, 0x86bc, 0 }, + { 0x87bf, 0x87bf, 0x87bf, 0 }, + { 0x88bf, 0x88bf, 0x88bf, 0 }, + { 0x89be, 0x89be, 0x89be, 0 }, + { 0x8ab9, 0x8ab9, 0x8ab9, 0 }, + { 0x8bb3, 0x8bb3, 0x8bb3, 0 }, + { 0x8cab, 0x8cab, 0x8cab, 0 }, + { 0x8da0, 0x8da0, 0x8da0, 0 }, + { 0x8e93, 0x8e93, 0x8e93, 0 }, + { 0x8f84, 0x8f84, 0x8f84, 0 }, + { 0x9073, 0x9073, 0x9073, 0 }, + { 0x9161, 0x9161, 0x9161, 0 }, + { 0x924c, 0x924c, 0x924c, 0 }, + { 0x9335, 0x9335, 0x9335, 0 }, + { 0x941d, 0x941d, 0x941d, 0 }, + { 0x9503, 0x9503, 0x9503, 0 }, + { 0x95e7, 0x95e7, 0x95e7, 0 }, + { 0x96c9, 0x96c9, 0x96c9, 0 }, + { 0x97aa, 0x97aa, 0x97aa, 0 }, + { 0x9889, 0x9889, 0x9889, 0 }, + { 0x9966, 0x9966, 0x9966, 0 }, + { 0x9a42, 0x9a42, 0x9a42, 0 }, + { 0x9b1c, 0x9b1c, 0x9b1c, 0 }, + { 0x9bf5, 0x9bf5, 0x9bf5, 0 }, + { 0x9ccc, 0x9ccc, 0x9ccc, 0 }, + { 0x9da1, 0x9da1, 0x9da1, 0 }, + { 0x9e76, 0x9e76, 0x9e76, 0 }, + { 0x9f49, 0x9f49, 0x9f49, 0 }, + { 0xa01a, 0xa01a, 0xa01a, 0 }, + { 0xa0ea, 0xa0ea, 0xa0ea, 0 }, + { 0xa1b9, 0xa1b9, 0xa1b9, 0 }, + { 0xa286, 0xa286, 0xa286, 0 }, + { 0xa352, 0xa352, 0xa352, 0 }, + { 0xa41d, 0xa41d, 0xa41d, 0 }, + { 0xa4e7, 0xa4e7, 0xa4e7, 0 }, + { 0xa5af, 0xa5af, 0xa5af, 0 }, + { 0xa676, 0xa676, 0xa676, 0 }, + { 0xa73c, 0xa73c, 0xa73c, 0 }, + { 0xa801, 0xa801, 0xa801, 0 }, + { 0xa8c5, 0xa8c5, 0xa8c5, 0 }, + { 0xa987, 0xa987, 0xa987, 0 }, + { 0xaa48, 0xaa48, 0xaa48, 0 }, + { 0xab09, 0xab09, 0xab09, 0 }, + { 0xabc8, 0xabc8, 0xabc8, 0 }, + { 0xac86, 0xac86, 0xac86, 0 }, + { 0xad43, 0xad43, 0xad43, 0 }, + { 0xadff, 0xadff, 0xadff, 0 }, + { 0xaeba, 0xaeba, 0xaeba, 0 }, + { 0xaf74, 0xaf74, 0xaf74, 0 }, + { 0xb02d, 0xb02d, 0xb02d, 0 }, + { 0xb0e5, 0xb0e5, 0xb0e5, 0 }, + { 0xb19c, 0xb19c, 0xb19c, 0 }, + { 0xb252, 0xb252, 0xb252, 0 }, + { 0xb307, 0xb307, 0xb307, 0 }, + { 0xb3bb, 0xb3bb, 0xb3bb, 0 }, + { 0xb46f, 0xb46f, 0xb46f, 0 }, + { 0xb521, 0xb521, 0xb521, 0 }, + { 0xb5d3, 0xb5d3, 0xb5d3, 0 }, + { 0xb683, 0xb683, 0xb683, 0 }, + { 0xb733, 0xb733, 0xb733, 0 }, + { 0xb7e2, 0xb7e2, 0xb7e2, 0 }, + { 0xb890, 0xb890, 0xb890, 0 }, + { 0xb93d, 0xb93d, 0xb93d, 0 }, + { 0xb9ea, 0xb9ea, 0xb9ea, 0 }, + { 0xba96, 0xba96, 0xba96, 0 }, + { 0xbb40, 0xbb40, 0xbb40, 0 }, + { 0xbbea, 0xbbea, 0xbbea, 0 }, + { 0xbc94, 0xbc94, 0xbc94, 0 }, + { 0xbd3c, 0xbd3c, 0xbd3c, 0 }, + { 0xbde4, 0xbde4, 0xbde4, 0 }, + { 0xbe8b, 0xbe8b, 0xbe8b, 0 }, + { 0xbf31, 0xbf31, 0xbf31, 0 }, + { 0xbfd7, 0xbfd7, 0xbfd7, 0 }, + { 0xc07b, 0xc07b, 0xc07b, 0 }, + { 0xc120, 0xc120, 0xc120, 0 }, + { 0xc1c3, 0xc1c3, 0xc1c3, 0 }, + { 0xc266, 0xc266, 0xc266, 0 }, + { 0xc308, 0xc308, 0xc308, 0 }, + { 0xc3a9, 0xc3a9, 0xc3a9, 0 }, + { 0xc449, 0xc449, 0xc449, 0 }, + { 0xc4e9, 0xc4e9, 0xc4e9, 0 }, + { 0xc589, 0xc589, 0xc589, 0 }, + { 0xc627, 0xc627, 0xc627, 0 }, + { 0xc6c5, 0xc6c5, 0xc6c5, 0 }, + { 0xc763, 0xc763, 0xc763, 0 }, + { 0xc7ff, 0xc7ff, 0xc7ff, 0 }, + { 0xc89b, 0xc89b, 0xc89b, 0 }, + { 0xc937, 0xc937, 0xc937, 0 }, + { 0xc9d2, 0xc9d2, 0xc9d2, 0 }, + { 0xca6c, 0xca6c, 0xca6c, 0 }, + { 0xcb06, 0xcb06, 0xcb06, 0 }, + { 0xcb9f, 0xcb9f, 0xcb9f, 0 }, + { 0xcc37, 0xcc37, 0xcc37, 0 }, + { 0xcccf, 0xcccf, 0xcccf, 0 }, + { 0xcd66, 0xcd66, 0xcd66, 0 }, + { 0xcdfd, 0xcdfd, 0xcdfd, 0 }, + { 0xce93, 0xce93, 0xce93, 0 }, + { 0xcf29, 0xcf29, 0xcf29, 0 }, + { 0xcfbe, 0xcfbe, 0xcfbe, 0 }, + { 0xd053, 0xd053, 0xd053, 0 }, + { 0xd0e7, 0xd0e7, 0xd0e7, 0 }, + { 0xd17a, 0xd17a, 0xd17a, 0 }, + { 0xd20d, 0xd20d, 0xd20d, 0 }, + { 0xd2a0, 0xd2a0, 0xd2a0, 0 }, + { 0xd331, 0xd331, 0xd331, 0 }, + { 0xd3c3, 0xd3c3, 0xd3c3, 0 }, + { 0xd454, 0xd454, 0xd454, 0 }, + { 0xd4e4, 0xd4e4, 0xd4e4, 0 }, + { 0xd574, 0xd574, 0xd574, 0 }, + { 0xd603, 0xd603, 0xd603, 0 }, + { 0xd692, 0xd692, 0xd692, 0 }, + { 0xd720, 0xd720, 0xd720, 0 }, + { 0xd7ae, 0xd7ae, 0xd7ae, 0 }, + { 0xd83c, 0xd83c, 0xd83c, 0 }, + { 0xd8c9, 0xd8c9, 0xd8c9, 0 }, + { 0xd955, 0xd955, 0xd955, 0 }, + { 0xd9e1, 0xd9e1, 0xd9e1, 0 }, + { 0xda6d, 0xda6d, 0xda6d, 0 }, + { 0xdaf8, 0xdaf8, 0xdaf8, 0 }, + { 0xdb83, 0xdb83, 0xdb83, 0 }, + { 0xdc0d, 0xdc0d, 0xdc0d, 0 }, + { 0xdc97, 0xdc97, 0xdc97, 0 }, + { 0xdd20, 0xdd20, 0xdd20, 0 }, + { 0xdda9, 0xdda9, 0xdda9, 0 }, + { 0xde31, 0xde31, 0xde31, 0 }, + { 0xdeb9, 0xdeb9, 0xdeb9, 0 }, + { 0xdf41, 0xdf41, 0xdf41, 0 }, + { 0xdfc8, 0xdfc8, 0xdfc8, 0 }, + { 0xe04f, 0xe04f, 0xe04f, 0 }, + { 0xe0d5, 0xe0d5, 0xe0d5, 0 }, + { 0xe15b, 0xe15b, 0xe15b, 0 }, + { 0xe1e0, 0xe1e0, 0xe1e0, 0 }, + { 0xe266, 0xe266, 0xe266, 0 }, + { 0xe2ea, 0xe2ea, 0xe2ea, 0 }, + { 0xe36f, 0xe36f, 0xe36f, 0 }, + { 0xe3f3, 0xe3f3, 0xe3f3, 0 }, + { 0xe476, 0xe476, 0xe476, 0 }, + { 0xe4f9, 0xe4f9, 0xe4f9, 0 }, + { 0xe57c, 0xe57c, 0xe57c, 0 }, + { 0xe5fe, 0xe5fe, 0xe5fe, 0 }, + { 0xe680, 0xe680, 0xe680, 0 }, + { 0xe702, 0xe702, 0xe702, 0 }, + { 0xe783, 0xe783, 0xe783, 0 }, + { 0xe804, 0xe804, 0xe804, 0 }, + { 0xe884, 0xe884, 0xe884, 0 }, + { 0xe905, 0xe905, 0xe905, 0 }, + { 0xe984, 0xe984, 0xe984, 0 }, + { 0xea04, 0xea04, 0xea04, 0 }, + { 0xea83, 0xea83, 0xea83, 0 }, + { 0xeb02, 0xeb02, 0xeb02, 0 }, + { 0xeb80, 0xeb80, 0xeb80, 0 }, + { 0xebfe, 0xebfe, 0xebfe, 0 }, + { 0xec7b, 0xec7b, 0xec7b, 0 }, + { 0xecf9, 0xecf9, 0xecf9, 0 }, + { 0xed76, 0xed76, 0xed76, 0 }, + { 0xedf2, 0xedf2, 0xedf2, 0 }, + { 0xee6f, 0xee6f, 0xee6f, 0 }, + { 0xeeeb, 0xeeeb, 0xeeeb, 0 }, + { 0xef66, 0xef66, 0xef66, 0 }, + { 0xefe2, 0xefe2, 0xefe2, 0 }, + { 0xf05d, 0xf05d, 0xf05d, 0 }, + { 0xf0d7, 0xf0d7, 0xf0d7, 0 }, + { 0xf152, 0xf152, 0xf152, 0 }, + { 0xf1cc, 0xf1cc, 0xf1cc, 0 }, + { 0xf245, 0xf245, 0xf245, 0 }, + { 0xf2bf, 0xf2bf, 0xf2bf, 0 }, + { 0xf338, 0xf338, 0xf338, 0 }, + { 0xf3b0, 0xf3b0, 0xf3b0, 0 }, + { 0xf429, 0xf429, 0xf429, 0 }, + { 0xf4a1, 0xf4a1, 0xf4a1, 0 }, + { 0xf519, 0xf519, 0xf519, 0 }, + { 0xf590, 0xf590, 0xf590, 0 }, + { 0xf608, 0xf608, 0xf608, 0 }, + { 0xf67e, 0xf67e, 0xf67e, 0 }, + { 0xf6f5, 0xf6f5, 0xf6f5, 0 }, + { 0xf76b, 0xf76b, 0xf76b, 0 }, + { 0xf7e1, 0xf7e1, 0xf7e1, 0 }, + { 0xf857, 0xf857, 0xf857, 0 }, + { 0xf8cd, 0xf8cd, 0xf8cd, 0 }, + { 0xf942, 0xf942, 0xf942, 0 }, + { 0xf9b7, 0xf9b7, 0xf9b7, 0 }, + { 0xfa2b, 0xfa2b, 0xfa2b, 0 }, + { 0xfaa0, 0xfaa0, 0xfaa0, 0 }, + { 0xfb14, 0xfb14, 0xfb14, 0 }, + { 0xfb88, 0xfb88, 0xfb88, 0 }, + { 0xfbfb, 0xfbfb, 0xfbfb, 0 }, + { 0xfc6e, 0xfc6e, 0xfc6e, 0 }, + { 0xfce1, 0xfce1, 0xfce1, 0 }, + { 0xfd54, 0xfd54, 0xfd54, 0 }, + { 0xfdc6, 0xfdc6, 0xfdc6, 0 }, + { 0xfe39, 0xfe39, 0xfe39, 0 }, + { 0xfeaa, 0xfeaa, 0xfeaa, 0 }, + { 0xff1c, 0xff1c, 0xff1c, 0 }, + { 0xff8d, 0xff8d, 0xff8d, 0 }, + { 0xffff, 0xffff, 0xffff, 0 }, +}; + +const struct vkms_color_lut srgb_inv_eotf = { + .base = srgb_inv_array, + .lut_length = LUT_SIZE, + .channel_value2index_ratio = 0xff00ffll +}; +EXPORT_SYMBOL(srgb_inv_eotf); diff --git a/drivers/gpu/drm/vkms/vkms_luts.h b/drivers/gpu/drm/vkms/vkms_luts.h new file mode 100644 index 000000000000..925a4a7b84e2 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_luts.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _VKMS_LUTS_H_ +#define _VKMS_LUTS_H_ + +#define LUT_SIZE 256 + +extern const struct vkms_color_lut linear_eotf; +extern const struct vkms_color_lut srgb_eotf; +extern const struct vkms_color_lut srgb_inv_eotf; + +#endif /* _VKMS_LUTS_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 2ee3749e2b28..86ce07a617f5 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -4,6 +4,7 @@ #include "vkms_connector.h" #include "vkms_drv.h" #include <drm/drm_managed.h> +#include <drm/drm_print.h> int vkms_output_init(struct vkms_device *vkmsdev) { @@ -19,11 +20,7 @@ int vkms_output_init(struct vkms_device *vkmsdev) return -EINVAL; vkms_config_for_each_plane(vkmsdev->config, plane_cfg) { - enum drm_plane_type type; - - type = vkms_config_plane_get_type(plane_cfg); - - plane_cfg->plane = vkms_plane_init(vkmsdev, type); + plane_cfg->plane = vkms_plane_init(vkmsdev, plane_cfg); if (IS_ERR(plane_cfg->plane)) { DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n"); return PTR_ERR(plane_cfg->plane); diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index e592e47a5736..19fe6acad306 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ +#include "vkms_config.h" #include <linux/iosys-map.h> #include <drm/drm_atomic.h> @@ -8,6 +9,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include "vkms_drv.h" #include "vkms_formats.h" @@ -217,7 +219,7 @@ static const struct drm_plane_helper_funcs vkms_plane_helper_funcs = { }; struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, - enum drm_plane_type type) + struct vkms_config_plane *plane_cfg) { struct drm_device *dev = &vkmsdev->drm; struct vkms_plane *plane; @@ -225,7 +227,8 @@ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 0, &vkms_plane_funcs, vkms_formats, ARRAY_SIZE(vkms_formats), - NULL, type, NULL); + NULL, vkms_config_plane_get_type(plane_cfg), + NULL); if (IS_ERR(plane)) return plane; @@ -243,5 +246,8 @@ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE); + if (vkms_config_plane_get_default_pipeline(plane_cfg)) + vkms_initialize_colorops(&plane->base); + return plane; } diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index 45d69a3b85f6..097ae1f0a230 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -6,6 +6,7 @@ #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_writeback.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c index 718832b08d96..c46f17ba7236 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c @@ -100,8 +100,10 @@ vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps) if (vmw->has_mob) { if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0) return VMW_CURSOR_UPDATE_MOB; + else + return VMW_CURSOR_UPDATE_GB_ONLY; } - + drm_warn_once(&vmw->drm, "Unknown Cursor Type!\n"); return VMW_CURSOR_UPDATE_NONE; } @@ -139,6 +141,7 @@ static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type, { switch (update_type) { case VMW_CURSOR_UPDATE_LEGACY: + case VMW_CURSOR_UPDATE_GB_ONLY: case VMW_CURSOR_UPDATE_NONE: return 0; case VMW_CURSOR_UPDATE_MOB: @@ -623,6 +626,7 @@ int vmw_cursor_plane_prepare_fb(struct drm_plane *plane, if (!surface || vps->cursor.legacy.id == surface->snooper.id) vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE; break; + case VMW_CURSOR_UPDATE_GB_ONLY: case VMW_CURSOR_UPDATE_MOB: { bo = vmw_user_object_buffer(&vps->uo); if (bo) { @@ -737,6 +741,7 @@ void vmw_cursor_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { + struct vmw_bo *bo; struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct drm_plane_state *old_state = @@ -762,6 +767,15 @@ vmw_cursor_plane_atomic_update(struct drm_plane *plane, case VMW_CURSOR_UPDATE_MOB: vmw_cursor_update_mob(dev_priv, vps); break; + case VMW_CURSOR_UPDATE_GB_ONLY: + bo = vmw_user_object_buffer(&vps->uo); + if (bo) + vmw_send_define_cursor_cmd(dev_priv, bo->map.virtual, + vps->base.crtc_w, + vps->base.crtc_h, + vps->base.hotspot_x, + vps->base.hotspot_y); + break; case VMW_CURSOR_UPDATE_NONE: /* do nothing */ break; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h index 40694925a70e..0c2cc0699b0d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h @@ -33,6 +33,7 @@ static const u32 __maybe_unused vmw_cursor_plane_formats[] = { enum vmw_cursor_update_type { VMW_CURSOR_UPDATE_NONE = 0, VMW_CURSOR_UPDATE_LEGACY, + VMW_CURSOR_UPDATE_GB_ONLY, VMW_CURSOR_UPDATE_MOB, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 8ff958d119be..599052d07ae8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1023,8 +1023,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->drm.dev, dev_priv->drm.anon_inode->i_mapping, dev_priv->drm.vma_offset_manager, - dev_priv->map_mode == vmw_dma_alloc_coherent, - false); + (dev_priv->map_mode == vmw_dma_alloc_coherent) ? + TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0); if (unlikely(ret != 0)) { drm_err(&dev_priv->drm, "Failed initializing TTM buffer object driver.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index eda5b6f8f4c4..f2abaf1bda6a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -16,6 +16,7 @@ #include <drm/drm_auth.h> #include <drm/drm_device.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include <drm/drm_rect.h> #include <drm/ttm/ttm_execbuf_util.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index d539f25b5fbe..3057f8baa7d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3668,6 +3668,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, cmd_id = header->id; + if (header->size > SVGA_CMD_MAX_DATASIZE) { + VMW_DEBUG_USER("SVGA3D command: %d is too big.\n", + cmd_id + SVGA_3D_CMD_BASE); + return -E2BIG; + } *size = header->size + sizeof(SVGA3dCmdHeader); cmd_id -= SVGA_3D_CMD_BASE; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index eedf1fe60be7..39f8c46550c2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -37,7 +37,7 @@ static void vmw_gem_object_free(struct drm_gem_object *gobj) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); if (bo) - ttm_bo_put(bo); + ttm_bo_fini(bo); } static int vmw_gem_object_open(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 54ea1b513950..d32ce1cb579e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -553,6 +553,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, memcpy(&vfbs->uo, uo, sizeof(vfbs->uo)); vmw_user_object_ref(&vfbs->uo); + if (vfbs->uo.buffer) + vfbs->base.base.obj[0] = &vfbs->uo.buffer->tbo.base; + *out = &vfbs->base; ret = drm_framebuffer_init(dev, &vfbs->base.base, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 7de20e56082c..fd4e76486f2d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -32,22 +32,22 @@ enum vmw_bo_dirty_method { /** * struct vmw_bo_dirty - Dirty information for buffer objects + * @ref_count: Reference count for this structure. Must be first member! * @start: First currently dirty bit * @end: Last currently dirty bit + 1 * @method: The currently used dirty method * @change_count: Number of consecutive method change triggers - * @ref_count: Reference count for this structure * @bitmap_size: The size of the bitmap in bits. Typically equal to the * nuber of pages in the bo. * @bitmap: A bitmap where each bit represents a page. A set bit means a * dirty page. */ struct vmw_bo_dirty { + struct kref ref_count; unsigned long start; unsigned long end; enum vmw_bo_dirty_method method; unsigned int change_count; - unsigned int ref_count; unsigned long bitmap_size; unsigned long bitmap[]; }; @@ -221,7 +221,7 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo) int ret; if (dirty) { - dirty->ref_count++; + kref_get(&dirty->ref_count); return 0; } @@ -235,7 +235,7 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo) dirty->bitmap_size = num_pages; dirty->start = dirty->bitmap_size; dirty->end = 0; - dirty->ref_count = 1; + kref_init(&dirty->ref_count); if (num_pages < PAGE_SIZE / sizeof(pte_t)) { dirty->method = VMW_BO_DIRTY_PAGETABLE; } else { @@ -274,10 +274,8 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; - if (dirty && --dirty->ref_count == 0) { - kvfree(dirty); + if (dirty && kref_put(&dirty->ref_count, (void *)kvfree)) vbo->dirty = NULL; - } } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 7e281c3c6bc5..c4ac9b47e23a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -15,6 +15,7 @@ #include "vmw_surface_cache.h" #include "device_include/svga3d_surfacedefs.h" +#include <drm/drm_dumb_buffers.h> #include <drm/ttm/ttm_placement.h> #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32) @@ -2267,23 +2268,9 @@ int vmw_dumb_create(struct drm_file *file_priv, * contents is going to be rendered guest side. */ if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) { - int cpp = DIV_ROUND_UP(args->bpp, 8); - - switch (cpp) { - case 1: /* DRM_FORMAT_C8 */ - case 2: /* DRM_FORMAT_RGB565 */ - case 4: /* DRM_FORMAT_XRGB8888 */ - break; - default: - /* - * Dumb buffers don't allow anything else. - * This is tested via IGT's dumb_buffers - */ - return -EINVAL; - } - - args->pitch = args->width * cpp; - args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); + ret = drm_mode_size_dumb(dev, args, 0, 0); + if (ret) + return ret; ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, args->size, &args->handle, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c index aec774fa4d7b..5abd7f5ad2db 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c @@ -247,9 +247,8 @@ vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct vmw_private *vmw = vmw_priv(dev); - unsigned int pipe = crtc->index; struct vmw_display_unit *du = vmw_crtc_to_du(crtc); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); if (!vmw->vkms_enabled) return false; @@ -281,8 +280,7 @@ vmw_vkms_enable_vblank(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct vmw_private *vmw = vmw_priv(dev); - unsigned int pipe = drm_crtc_index(crtc); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct vmw_display_unit *du = vmw_crtc_to_du(crtc); if (!vmw->vkms_enabled) diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 7219f6b884b6..4b288eb3f5b0 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -13,7 +13,6 @@ config DRM_XE select TMPFS select DRM_BUDDY select DRM_CLIENT_SELECTION - select DRM_EXEC select DRM_KMS_HELPER select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n select DRM_PANEL diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug index 87902b4bd6d3..01227c77f6d7 100644 --- a/drivers/gpu/drm/xe/Kconfig.debug +++ b/drivers/gpu/drm/xe/Kconfig.debug @@ -40,23 +40,23 @@ config DRM_XE_DEBUG_VM If in doubt, say "N". -config DRM_XE_DEBUG_MEMIRQ - bool "Enable extra memirq debugging" +config DRM_XE_DEBUG_SRIOV + bool "Enable extra SR-IOV debugging" default n + imply DRM_XE_DEBUG_MEMIRQ help - Choose this option to enable additional debugging info for - memory based interrupts. + Enable extra SR-IOV debugging info. Recommended for driver developers only. If in doubt, say "N". -config DRM_XE_DEBUG_SRIOV - bool "Enable extra SR-IOV debugging" +config DRM_XE_DEBUG_MEMIRQ + bool "Enable extra memirq debugging" default n - select DRM_XE_DEBUG_MEMIRQ help - Enable extra SR-IOV debugging info. + Choose this option to enable additional debugging info for + memory based interrupts. Recommended for driver developers only. diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index d9c6cf0f189e..e4b273b025d2 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -58,7 +58,6 @@ xe-y += xe_bb.o \ xe_gt_freq.o \ xe_gt_idle.o \ xe_gt_mcr.o \ - xe_gt_pagefault.o \ xe_gt_sysfs.o \ xe_gt_throttle.o \ xe_gt_topology.o \ @@ -73,6 +72,7 @@ xe-y += xe_bb.o \ xe_guc_id_mgr.o \ xe_guc_klv_helpers.o \ xe_guc_log.o \ + xe_guc_pagefault.o \ xe_guc_pc.o \ xe_guc_submit.o \ xe_guc_tlb_inval.o \ @@ -94,6 +94,7 @@ xe-y += xe_bb.o \ xe_nvm.o \ xe_oa.o \ xe_observation.o \ + xe_pagefault.o \ xe_pat.o \ xe_pci.o \ xe_pcode.o \ @@ -173,8 +174,15 @@ xe-$(CONFIG_PCI_IOV) += \ xe_lmtt_2l.o \ xe_lmtt_ml.o \ xe_pci_sriov.o \ + xe_sriov_packet.o \ xe_sriov_pf.o \ - xe_sriov_pf_service.o + xe_sriov_pf_control.o \ + xe_sriov_pf_debugfs.o \ + xe_sriov_pf_migration.o \ + xe_sriov_pf_provision.o \ + xe_sriov_pf_service.o \ + xe_sriov_pf_sysfs.o \ + xe_tile_sriov_pf_debugfs.o # include helpers for tests even when XE is built-in ifdef CONFIG_DRM_XE_KUNIT_TEST @@ -201,7 +209,6 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE # Display code specific to xe xe-$(CONFIG_DRM_XE_DISPLAY) += \ display/ext/i915_irq.o \ - display/ext/i915_utils.o \ display/intel_bo.o \ display/intel_fb_bo.o \ display/intel_fbdev_fb.o \ @@ -214,6 +221,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ display/xe_hdcp_gsc.o \ display/xe_panic.o \ display/xe_plane_initial.o \ + display/xe_stolen.o \ display/xe_tdf.o # SOC code shared with i915 @@ -230,6 +238,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_backlight.o \ i915-display/intel_bios.o \ i915-display/intel_bw.o \ + i915-display/intel_casf.o \ i915-display/intel_cdclk.o \ i915-display/intel_cmtg.o \ i915-display/intel_color.o \ @@ -239,6 +248,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_crtc_state_dump.o \ i915-display/intel_cursor.o \ i915-display/intel_cx0_phy.o \ + i915-display/intel_dbuf_bw.o \ i915-display/intel_ddi.o \ i915-display/intel_ddi_buf_trans.o \ i915-display/intel_display.o \ @@ -250,7 +260,9 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_display_power.o \ i915-display/intel_display_power_map.o \ i915-display/intel_display_power_well.o \ + i915-display/intel_display_rpm.o \ i915-display/intel_display_trace.o \ + i915-display/intel_display_utils.o \ i915-display/intel_display_wa.o \ i915-display/intel_dkl_phy.o \ i915-display/intel_dmc.o \ @@ -287,6 +299,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_hti.o \ i915-display/intel_link_bw.o \ i915-display/intel_lspcon.o \ + i915-display/intel_lt_phy.o \ i915-display/intel_modeset_lock.o \ i915-display/intel_modeset_setup.o \ i915-display/intel_modeset_verify.o \ @@ -307,6 +320,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_vga.o \ i915-display/intel_vrr.o \ i915-display/intel_wm.o \ + i915-display/skl_prefill.o \ i915-display/skl_scaler.o \ i915-display/skl_universal_plane.o \ i915-display/skl_watermark.o diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h index 31090c69dfbe..47756e4674a1 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h @@ -196,14 +196,6 @@ enum xe_guc_register_context_multi_lrc_param_offsets { XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN = 11, }; -enum xe_guc_context_wq_item_offsets { - XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN = 0, - XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW, - XE_GUC_CONTEXT_WQ_EL_INFO_DATA_2_GUCCTX_RINGTAIL_FREEZEPOCS, - XE_GUC_CONTEXT_WQ_EL_INFO_DATA_3_WI_FENCE_ID, - XE_GUC_CONTEXT_WQ_EL_CHILD_LIST_DATA_4_RINGTAIL, -}; - enum xe_guc_report_status { XE_GUC_REPORT_STATUS_UNKNOWN = 0x0, XE_GUC_REPORT_STATUS_ACKED = 0x1, diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h index 8a048980ea38..0548b2e0316f 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h @@ -5,10 +5,8 @@ #define __I915_GEM_OBJECT_H__ struct dma_fence; -struct i915_sched_attr; -static inline void i915_gem_fence_wait_priority(struct dma_fence *fence, - const struct i915_sched_attr *attr) +static inline void i915_gem_fence_wait_priority_display(struct dma_fence *fence) { } diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h index f097fc6d5127..48e3256ba37e 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h @@ -6,80 +6,35 @@ #ifndef _I915_GEM_STOLEN_H_ #define _I915_GEM_STOLEN_H_ -#include "xe_ttm_stolen_mgr.h" -#include "xe_res_cursor.h" -#include "xe_validation.h" - -struct xe_bo; - -struct i915_stolen_fb { - struct xe_bo *bo; -}; - -static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe, - struct i915_stolen_fb *fb, - u32 size, u32 align, - u32 start, u32 end) -{ - struct xe_bo *bo; - int err = 0; - u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN; - - if (start < SZ_4K) - start = SZ_4K; - - if (align) { - size = ALIGN(size, align); - start = ALIGN(start, align); - } - - bo = xe_bo_create_pin_range_novm(xe, xe_device_get_root_tile(xe), - size, start, end, ttm_bo_type_kernel, flags); - if (IS_ERR(bo)) { - err = PTR_ERR(bo); - bo = NULL; - return err; - } - - fb->bo = bo; - - return err; -} - -static inline int i915_gem_stolen_insert_node(struct xe_device *xe, - struct i915_stolen_fb *fb, - u32 size, u32 align) -{ - /* Not used on xe */ - BUG_ON(1); - return -ENODEV; -} - -static inline void i915_gem_stolen_remove_node(struct xe_device *xe, - struct i915_stolen_fb *fb) -{ - xe_bo_unpin_map_no_vm(fb->bo); - fb->bo = NULL; -} - -#define i915_gem_stolen_initialized(xe) (!!ttm_manager_type(&(xe)->ttm, XE_PL_STOLEN)) -#define i915_gem_stolen_node_allocated(fb) (!!((fb)->bo)) - -static inline u32 i915_gem_stolen_node_offset(struct i915_stolen_fb *fb) -{ - struct xe_res_cursor res; - - xe_res_first(fb->bo->ttm.resource, 0, 4096, &res); - return res.start; -} - -/* Used for < gen4. These are not supported by Xe */ -#define i915_gem_stolen_area_address(xe) (!WARN_ON(1)) -/* Used for gen9 specific WA. Gen9 is not supported by Xe */ -#define i915_gem_stolen_area_size(xe) (!WARN_ON(1)) - -#define i915_gem_stolen_node_address(xe, fb) (xe_ttm_stolen_gpu_offset(xe) + \ - i915_gem_stolen_node_offset(fb)) -#define i915_gem_stolen_node_size(fb) ((u64)((fb)->bo->ttm.base.size)) +#include <linux/types.h> + +struct drm_device; +struct intel_stolen_node; + +int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size, + unsigned int align, u64 start, u64 end); + +int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size, + unsigned int align); + +void i915_gem_stolen_remove_node(struct intel_stolen_node *node); + +bool i915_gem_stolen_initialized(struct drm_device *drm); + +bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node); + +u32 i915_gem_stolen_node_offset(struct intel_stolen_node *node); + +u64 i915_gem_stolen_area_address(struct drm_device *drm); + +u64 i915_gem_stolen_area_size(struct drm_device *drm); + +u64 i915_gem_stolen_node_address(struct intel_stolen_node *node); + +u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node); + +struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm); + +void i915_gem_stolen_node_free(const struct intel_stolen_node *node); #endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h index b8269391bc69..3e79a74ff7de 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h @@ -12,7 +12,6 @@ #include <drm/drm_drv.h> -#include "xe_device.h" /* for xe_device_has_flat_ccs() */ #include "xe_device_types.h" static inline struct drm_i915_private *to_i915(const struct drm_device *dev) @@ -35,7 +34,4 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev) #define IS_MOBILE(xe) (xe && 0) -#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe)) -#define HAS_128_BYTE_Y_TILING(xe) (xe || 1) - #endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h deleted file mode 100644 index c11130440d31..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* Copyright © 2025 Intel Corporation */ - -#ifndef __I915_SCHEDULER_TYPES_H__ -#define __I915_SCHEDULER_TYPES_H__ - -#define I915_PRIORITY_DISPLAY 0 - -struct i915_sched_attr { - int priority; -}; - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h index 1d7c4360e5c0..bcd441dc0fce 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h @@ -3,4 +3,11 @@ * Copyright © 2023 Intel Corporation */ -#include "../../i915/i915_utils.h" +/* for soc/ */ +#ifndef MISSING_CASE +#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ + __stringify(x), (long)(x)) +#endif + +/* for a couple of users under i915/display */ +#define i915_inject_probe_failure(unused) ((unused) && 0) diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h index 4465c40f8134..b17e3bab23d5 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h @@ -26,8 +26,6 @@ struct i915_vma { struct xe_ggtt_node *node; }; -#define i915_ggtt_clear_scanout(bo) do { } while (0) - #define i915_vma_fence_id(vma) -1 static inline u32 i915_ggtt_offset(const struct i915_vma *vma) diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h index d012f02bc84f..d93ddacdf743 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h @@ -91,27 +91,6 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore, return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set); } -static inline int intel_wait_for_register(struct intel_uncore *uncore, - i915_reg_t i915_reg, u32 mask, - u32 value, unsigned int timeout) -{ - struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - - return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, - timeout * USEC_PER_MSEC, NULL, false); -} - -static inline int intel_wait_for_register_fw(struct intel_uncore *uncore, - i915_reg_t i915_reg, u32 mask, - u32 value, unsigned int timeout, - u32 *out_value) -{ - struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - - return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, - timeout * USEC_PER_MSEC, out_value, false); -} - static inline int __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg, u32 mask, u32 value, unsigned int fast_timeout_us, @@ -133,6 +112,16 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg, out_value, atomic); } +static inline int +__intel_wait_for_register_fw(struct intel_uncore *uncore, i915_reg_t i915_reg, + u32 mask, u32 value, unsigned int fast_timeout_us, + unsigned int slow_timeout_ms, u32 *out_value) +{ + return __intel_wait_for_register(uncore, i915_reg, mask, value, + fast_timeout_us, slow_timeout_ms, + out_value); +} + static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore, i915_reg_t i915_reg) { diff --git a/drivers/gpu/drm/xe/display/ext/i915_utils.c b/drivers/gpu/drm/xe/display/ext/i915_utils.c deleted file mode 100644 index 1421c2a7b64d..000000000000 --- a/drivers/gpu/drm/xe/display/ext/i915_utils.c +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2023 Intel Corporation - */ - -#include "i915_drv.h" -#include "i915_utils.h" - -bool i915_vtd_active(struct drm_i915_private *i915) -{ - if (device_iommu_mapped(i915->drm.dev)) - return true; - - /* Running as a guest, we assume the host is enforcing VT'd */ - return i915_run_as_guest(); -} - -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) - -/* i915 specific, just put here for shutting it up */ -int __i915_inject_probe_error(struct drm_i915_private *i915, int err, - const char *func, int line) -{ - return 0; -} - -#endif diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c index 27437c22bd70..bad2243b9114 100644 --- a/drivers/gpu/drm/xe/display/intel_bo.c +++ b/drivers/gpu/drm/xe/display/intel_bo.c @@ -5,6 +5,7 @@ #include "xe_bo.h" #include "intel_bo.h" +#include "intel_frontbuffer.h" bool intel_bo_is_tiled(struct drm_gem_object *obj) { @@ -28,10 +29,6 @@ bool intel_bo_is_protected(struct drm_gem_object *obj) return xe_bo_is_protected(gem_to_xe_bo(obj)); } -void intel_bo_flush_if_display(struct drm_gem_object *obj) -{ -} - int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { return drm_gem_prime_mmap(obj, vma); @@ -44,15 +41,60 @@ int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, i return xe_bo_read(bo, offset, dst, size); } -struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj) +struct xe_frontbuffer { + struct intel_frontbuffer base; + struct drm_gem_object *obj; + struct kref ref; +}; + +struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj) +{ + struct xe_frontbuffer *front; + + front = kmalloc(sizeof(*front), GFP_KERNEL); + if (!front) + return NULL; + + intel_frontbuffer_init(&front->base, obj->dev); + + kref_init(&front->ref); + + drm_gem_object_get(obj); + front->obj = obj; + + return &front->base; +} + +void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front) { - return NULL; + struct xe_frontbuffer *front = + container_of(_front, typeof(*front), base); + + kref_get(&front->ref); +} + +static void frontbuffer_release(struct kref *ref) +{ + struct xe_frontbuffer *front = + container_of(ref, typeof(*front), ref); + + intel_frontbuffer_fini(&front->base); + + drm_gem_object_put(front->obj); + + kfree(front); +} + +void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front) +{ + struct xe_frontbuffer *front = + container_of(_front, typeof(*front), base); + + kref_put(&front->ref, frontbuffer_release); } -struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, - struct intel_frontbuffer *front) +void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front) { - return front; } void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c index ebdb22c9499d..db8b1a27b4de 100644 --- a/drivers/gpu/drm/xe/display/intel_fb_bo.c +++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c @@ -24,8 +24,7 @@ void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj) xe_bo_put(bo); } -int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb, - struct drm_gem_object *obj, +int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { struct xe_bo *bo = gem_to_xe_bo(obj); diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c index 8ea9a472113c..7ad76022cb14 100644 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c @@ -3,45 +3,34 @@ * Copyright © 2023 Intel Corporation */ -#include <drm/drm_fb_helper.h> +#include <linux/fb.h> -#include "intel_display_core.h" -#include "intel_display_types.h" -#include "intel_fb.h" #include "intel_fbdev_fb.h" #include "xe_bo.h" #include "xe_ttm_stolen_mgr.h" #include "xe_wa.h" -#include <generated/xe_wa_oob.h> +#include <generated/xe_device_wa_oob.h> -struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +/* + * FIXME: There shouldn't be any reason to have XE_PAGE_SIZE stride + * alignment. The same 64 as i915 uses should be fine, and we shouldn't need to + * have driver specific values. However, dropping the stride alignment to 64 + * leads to underflowing the bo pin count in the atomic cleanup work. + */ +u32 intel_fbdev_fb_pitch_align(u32 stride) { - struct drm_framebuffer *fb; - struct drm_device *dev = helper->dev; - struct xe_device *xe = to_xe_device(dev); - struct drm_mode_fb_cmd2 mode_cmd = {}; - struct xe_bo *obj; - int size; - - /* we don't do packed 24bpp */ - if (sizes->surface_bpp == 24) - sizes->surface_bpp = 32; - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; + return ALIGN(stride, XE_PAGE_SIZE); +} - mode_cmd.pitches[0] = ALIGN(mode_cmd.width * - DIV_ROUND_UP(sizes->surface_bpp, 8), XE_PAGE_SIZE); - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); +struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size) +{ + struct xe_device *xe = to_xe_device(drm); + struct xe_bo *obj; - size = mode_cmd.pitches[0] * mode_cmd.height; - size = PAGE_ALIGN(size); obj = ERR_PTR(-ENODEV); - if (!IS_DGFX(xe) && !XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display)) { + if (!IS_DGFX(xe) && !XE_DEVICE_WA(xe, 22019338487_display)) { obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), size, ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | @@ -62,33 +51,22 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, if (IS_ERR(obj)) { drm_err(&xe->drm, "failed to allocate framebuffer (%pe)\n", obj); - fb = ERR_PTR(-ENOMEM); - goto err; - } - - fb = intel_framebuffer_create(&obj->ttm.base, - drm_get_format_info(dev, - mode_cmd.pixel_format, - mode_cmd.modifier[0]), - &mode_cmd); - if (IS_ERR(fb)) { - xe_bo_unpin_map_no_vm(obj); - goto err; + return ERR_PTR(-ENOMEM); } - drm_gem_object_put(&obj->ttm.base); - - return to_intel_framebuffer(fb); + return &obj->ttm.base; +} -err: - return ERR_CAST(fb); +void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj) +{ + xe_bo_unpin_map_no_vm(gem_to_xe_bo(obj)); } -int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info, +int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info, struct drm_gem_object *_obj, struct i915_vma *vma) { struct xe_bo *obj = gem_to_xe_bo(_obj); - struct pci_dev *pdev = to_pci_dev(display->drm->dev); + struct pci_dev *pdev = to_pci_dev(drm->dev); if (!(obj->flags & XE_BO_FLAG_SYSTEM)) { if (obj->flags & XE_BO_FLAG_STOLEN) diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 19e691fccf8c..8b0afa270216 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -13,6 +13,8 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> +#include <drm/intel/display_member.h> +#include <drm/intel/display_parent_interface.h> #include <uapi/drm/xe_drm.h> #include "soc/intel_dram.h" @@ -33,8 +35,12 @@ #include "intel_hotplug.h" #include "intel_opregion.h" #include "skl_watermark.h" +#include "xe_display_rpm.h" #include "xe_module.h" +/* Ensure drm and display members are placed properly. */ +INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct xe_device, drm, display); + /* Xe device functions */ /** @@ -223,15 +229,14 @@ void xe_display_irq_reset(struct xe_device *xe) gen11_display_irq_reset(display); } -void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) +void xe_display_irq_postinstall(struct xe_device *xe) { struct intel_display *display = xe->display; if (!xe->info.probe_display) return; - if (gt->info.id == XE_GT0) - gen11_de_irq_postinstall(display); + gen11_de_irq_postinstall(display); } static bool suspend_to_idle(void) @@ -324,7 +329,7 @@ void xe_display_pm_suspend(struct xe_device *xe) * properly. */ intel_power_domains_disable(display); - drm_client_dev_suspend(&xe->drm, false); + drm_client_dev_suspend(&xe->drm); if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(&xe->drm); @@ -356,7 +361,7 @@ void xe_display_pm_shutdown(struct xe_device *xe) return; intel_power_domains_disable(display); - drm_client_dev_suspend(&xe->drm, false); + drm_client_dev_suspend(&xe->drm); if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(&xe->drm); @@ -481,7 +486,7 @@ void xe_display_pm_resume(struct xe_device *xe) intel_opregion_resume(display); - drm_client_dev_resume(&xe->drm, false); + drm_client_dev_resume(&xe->drm); intel_power_domains_enable(display); } @@ -511,6 +516,10 @@ static void display_device_remove(struct drm_device *dev, void *arg) intel_display_device_remove(display); } +static const struct intel_display_parent_interface parent = { + .rpm = &xe_display_rpm_interface, +}; + /** * xe_display_probe - probe display and create display struct * @xe: XE device instance @@ -531,7 +540,7 @@ int xe_display_probe(struct xe_device *xe) if (!xe->info.probe_display) goto no_display; - display = intel_display_device_probe(pdev); + display = intel_display_device_probe(pdev, &parent); if (IS_ERR(display)) return PTR_ERR(display); diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h index e533aa4750bc..76db95c25f7e 100644 --- a/drivers/gpu/drm/xe/display/xe_display.h +++ b/drivers/gpu/drm/xe/display/xe_display.h @@ -26,7 +26,7 @@ void xe_display_unregister(struct xe_device *xe); void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl); void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir); void xe_display_irq_reset(struct xe_device *xe); -void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt); +void xe_display_irq_postinstall(struct xe_device *xe); void xe_display_pm_suspend(struct xe_device *xe); void xe_display_pm_shutdown(struct xe_device *xe); @@ -55,7 +55,7 @@ static inline void xe_display_unregister(struct xe_device *xe) {} static inline void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) {} static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) {} static inline void xe_display_irq_reset(struct xe_device *xe) {} -static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {} +static inline void xe_display_irq_postinstall(struct xe_device *xe) {} static inline void xe_display_pm_suspend(struct xe_device *xe) {} static inline void xe_display_pm_shutdown(struct xe_device *xe) {} diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c index 3825376e98cc..340f65884812 100644 --- a/drivers/gpu/drm/xe/display/xe_display_rpm.c +++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c @@ -1,73 +1,74 @@ // SPDX-License-Identifier: MIT /* Copyright © 2025 Intel Corporation */ +#include <drm/intel/display_parent_interface.h> + #include "intel_display_core.h" #include "intel_display_rpm.h" #include "xe_device.h" #include "xe_device_types.h" #include "xe_pm.h" -static struct xe_device *display_to_xe(struct intel_display *display) -{ - return to_xe_device(display->drm); -} - -struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display) +static struct ref_tracker *xe_display_rpm_get(const struct drm_device *drm) { - return intel_display_rpm_get(display); + return xe_pm_runtime_resume_and_get(to_xe_device(drm)) ? INTEL_WAKEREF_DEF : NULL; } -void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref) +static struct ref_tracker *xe_display_rpm_get_if_in_use(const struct drm_device *drm) { - intel_display_rpm_put(display, wakeref); + return xe_pm_runtime_get_if_in_use(to_xe_device(drm)) ? INTEL_WAKEREF_DEF : NULL; } -struct ref_tracker *intel_display_rpm_get(struct intel_display *display) +static struct ref_tracker *xe_display_rpm_get_noresume(const struct drm_device *drm) { - return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL; -} - -struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display) -{ - return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL; -} - -struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display) -{ - xe_pm_runtime_get_noresume(display_to_xe(display)); + xe_pm_runtime_get_noresume(to_xe_device(drm)); return INTEL_WAKEREF_DEF; } -void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref) +static void xe_display_rpm_put(const struct drm_device *drm, struct ref_tracker *wakeref) { if (wakeref) - xe_pm_runtime_put(display_to_xe(display)); + xe_pm_runtime_put(to_xe_device(drm)); } -void intel_display_rpm_put_unchecked(struct intel_display *display) +static void xe_display_rpm_put_unchecked(const struct drm_device *drm) { - xe_pm_runtime_put(display_to_xe(display)); + xe_pm_runtime_put(to_xe_device(drm)); } -bool intel_display_rpm_suspended(struct intel_display *display) +static bool xe_display_rpm_suspended(const struct drm_device *drm) { - struct xe_device *xe = display_to_xe(display); + struct xe_device *xe = to_xe_device(drm); return pm_runtime_suspended(xe->drm.dev); } -void assert_display_rpm_held(struct intel_display *display) +static void xe_display_rpm_assert_held(const struct drm_device *drm) { /* FIXME */ } -void intel_display_rpm_assert_block(struct intel_display *display) +static void xe_display_rpm_assert_block(const struct drm_device *drm) { /* FIXME */ } -void intel_display_rpm_assert_unblock(struct intel_display *display) +static void xe_display_rpm_assert_unblock(const struct drm_device *drm) { /* FIXME */ } + +const struct intel_display_rpm_interface xe_display_rpm_interface = { + .get = xe_display_rpm_get, + .get_raw = xe_display_rpm_get, + .get_if_in_use = xe_display_rpm_get_if_in_use, + .get_noresume = xe_display_rpm_get_noresume, + .put = xe_display_rpm_put, + .put_raw = xe_display_rpm_put, + .put_unchecked = xe_display_rpm_put_unchecked, + .suspended = xe_display_rpm_suspended, + .assert_held = xe_display_rpm_assert_held, + .assert_block = xe_display_rpm_assert_block, + .assert_unblock = xe_display_rpm_assert_unblock +}; diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.h b/drivers/gpu/drm/xe/display/xe_display_rpm.h new file mode 100644 index 000000000000..0bf9d31e87c1 --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_display_rpm.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_DISPLAY_RPM_H_ +#define _XE_DISPLAY_RPM_H_ + +extern const struct intel_display_rpm_interface xe_display_rpm_interface; + +#endif /* _XE_DISPLAY_RPM_H_ */ diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c index 8ada1cbcb16c..2aa1b8c03411 100644 --- a/drivers/gpu/drm/xe/display/xe_display_wa.c +++ b/drivers/gpu/drm/xe/display/xe_display_wa.c @@ -13,6 +13,7 @@ bool intel_display_needs_wa_16023588340(struct intel_display *display) { struct xe_device *xe = to_xe_device(display->drm); + struct xe_gt *wa_gt = xe_root_mmio_gt(xe); - return XE_GT_WA(xe_root_mmio_gt(xe), 16023588340); + return wa_gt && XE_GT_WA(wa_gt, 16023588340); } diff --git a/drivers/gpu/drm/xe/display/xe_panic.c b/drivers/gpu/drm/xe/display/xe_panic.c index f32b23338331..df663286092a 100644 --- a/drivers/gpu/drm/xe/display/xe_panic.c +++ b/drivers/gpu/drm/xe/display/xe_panic.c @@ -8,20 +8,23 @@ #include "intel_fb.h" #include "intel_panic.h" #include "xe_bo.h" +#include "xe_res_cursor.h" struct intel_panic { - struct page **pages; + struct xe_res_cursor res; + struct iosys_map vmap; + int page; - void *vaddr; }; static void xe_panic_kunmap(struct intel_panic *panic) { - if (panic->vaddr) { - drm_clflush_virt_range(panic->vaddr, PAGE_SIZE); - kunmap_local(panic->vaddr); - panic->vaddr = NULL; + if (!panic->vmap.is_iomem && iosys_map_is_set(&panic->vmap)) { + drm_clflush_virt_range(panic->vmap.vaddr, PAGE_SIZE); + kunmap_local(panic->vmap.vaddr); } + iosys_map_clear(&panic->vmap); + panic->page = -1; } /* @@ -46,15 +49,29 @@ static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int new_page = offset >> PAGE_SHIFT; offset = offset % PAGE_SIZE; if (new_page != panic->page) { - xe_panic_kunmap(panic); + if (xe_bo_is_vram(bo)) { + /* Display is always mapped on root tile */ + struct xe_vram_region *vram = xe_bo_device(bo)->mem.vram; + + if (panic->page < 0 || new_page < panic->page) { + xe_res_first(bo->ttm.resource, new_page * PAGE_SIZE, + bo->ttm.base.size - new_page * PAGE_SIZE, &panic->res); + } else { + xe_res_next(&panic->res, PAGE_SIZE * (new_page - panic->page)); + } + iosys_map_set_vaddr_iomem(&panic->vmap, + vram->mapping + panic->res.start); + } else { + xe_panic_kunmap(panic); + iosys_map_set_vaddr(&panic->vmap, + ttm_bo_kmap_try_from_panic(&bo->ttm, + new_page)); + } panic->page = new_page; - panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm, - panic->page); - } - if (panic->vaddr) { - u32 *pix = panic->vaddr + offset; - *pix = color; } + + if (iosys_map_is_set(&panic->vmap)) + iosys_map_wr(&panic->vmap, offset, u32, color); } struct intel_panic *intel_panic_alloc(void) @@ -68,6 +85,12 @@ struct intel_panic *intel_panic_alloc(void) int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb) { + struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; + struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base)); + + if (xe_bo_is_vram(bo) && !xe_bo_is_visible_vram(bo)) + return -ENODEV; + panic->page = -1; sb->set_pixel = xe_panic_page_set_pixel; return 0; @@ -76,5 +99,4 @@ int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb) void intel_panic_finish(struct intel_panic *panic) { xe_panic_kunmap(panic); - panic->page = -1; } diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index 94f00def811b..12d25c5290fd 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -25,7 +25,7 @@ #include "xe_vram_types.h" #include "xe_wa.h" -#include <generated/xe_wa_oob.h> +#include <generated/xe_device_wa_oob.h> void intel_plane_initial_vblank_wait(struct intel_crtc *crtc) { @@ -123,7 +123,7 @@ initial_plane_bo(struct xe_device *xe, phys_base = base; flags |= XE_BO_FLAG_STOLEN; - if (XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display)) + if (XE_DEVICE_WA(xe, 22019338487_display)) return NULL; /* diff --git a/drivers/gpu/drm/xe/display/xe_stolen.c b/drivers/gpu/drm/xe/display/xe_stolen.c new file mode 100644 index 000000000000..9f04ba36e930 --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_stolen.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include "gem/i915_gem_stolen.h" +#include "xe_res_cursor.h" +#include "xe_ttm_stolen_mgr.h" +#include "xe_validation.h" + +struct intel_stolen_node { + struct xe_device *xe; + struct xe_bo *bo; +}; + +int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size, + unsigned int align, u64 start, u64 end) +{ + struct xe_device *xe = node->xe; + + struct xe_bo *bo; + int err = 0; + u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN; + + if (start < SZ_4K) + start = SZ_4K; + + if (align) { + size = ALIGN(size, align); + start = ALIGN(start, align); + } + + bo = xe_bo_create_pin_range_novm(xe, xe_device_get_root_tile(xe), + size, start, end, ttm_bo_type_kernel, flags); + if (IS_ERR(bo)) { + err = PTR_ERR(bo); + bo = NULL; + return err; + } + + node->bo = bo; + + return err; +} + +int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size, unsigned int align) +{ + /* Not used on xe */ + WARN_ON(1); + + return -ENODEV; +} + +void i915_gem_stolen_remove_node(struct intel_stolen_node *node) +{ + xe_bo_unpin_map_no_vm(node->bo); + node->bo = NULL; +} + +bool i915_gem_stolen_initialized(struct drm_device *drm) +{ + struct xe_device *xe = to_xe_device(drm); + + return ttm_manager_type(&xe->ttm, XE_PL_STOLEN); +} + +bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node) +{ + return node->bo; +} + +u32 i915_gem_stolen_node_offset(struct intel_stolen_node *node) +{ + struct xe_res_cursor res; + + xe_res_first(node->bo->ttm.resource, 0, 4096, &res); + return res.start; +} + +/* Used for < gen4. These are not supported by Xe */ +u64 i915_gem_stolen_area_address(struct drm_device *drm) +{ + WARN_ON(1); + + return 0; +} + +/* Used for gen9 specific WA. Gen9 is not supported by Xe */ +u64 i915_gem_stolen_area_size(struct drm_device *drm) +{ + WARN_ON(1); + + return 0; +} + +u64 i915_gem_stolen_node_address(struct intel_stolen_node *node) +{ + struct xe_device *xe = node->xe; + + return xe_ttm_stolen_gpu_offset(xe) + i915_gem_stolen_node_offset(node); +} + +u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node) +{ + return node->bo->ttm.base.size; +} + +struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm) +{ + struct xe_device *xe = to_xe_device(drm); + struct intel_stolen_node *node; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return NULL; + + node->xe = xe; + + return node; +} + +void i915_gem_stolen_node_free(const struct intel_stolen_node *node) +{ + kfree(node); +} diff --git a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h index 8cfcd3360896..5d41ca297447 100644 --- a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h @@ -31,6 +31,12 @@ #define XY_FAST_COPY_BLT_D1_DST_TILE4 REG_BIT(30) #define XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK GENMASK(23, 20) +#define MEM_COPY_CMD (2 << 29 | 0x5a << 22 | 0x8) +#define MEM_COPY_PAGE_COPY_MODE REG_BIT(19) +#define MEM_COPY_MATRIX_COPY REG_BIT(17) +#define MEM_COPY_SRC_MOCS_INDEX_MASK GENMASK(31, 28) +#define MEM_COPY_DST_MOCS_INDEX_MASK GENMASK(6, 3) + #define PVC_MEM_SET_CMD (2 << 29 | 0x5b << 22) #define PVC_MEM_SET_CMD_LEN_DW 7 #define PVC_MEM_SET_MATRIX REG_BIT(17) diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index f4c3e1187a00..68172b0248a6 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -141,6 +141,8 @@ #define INHIBIT_SWITCH_UNTIL_PREEMPTED REG_BIT(31) #define IDLE_DELAY REG_GENMASK(20, 0) +#define RING_CURRENT_LRCA(base) XE_REG((base) + 0x240) + #define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED) #define CTX_CTRL_PXP_ENABLE REG_BIT(10) #define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8) @@ -153,6 +155,8 @@ #define GFX_DISABLE_LEGACY_MODE REG_BIT(3) #define GFX_MSIX_INTERRUPT_ENABLE REG_BIT(13) +#define RING_CSMQDEBUG(base) XE_REG((base) + 0x2b0) + #define RING_TIMESTAMP(base) XE_REG((base) + 0x358) #define RING_TIMESTAMP_UDW(base) XE_REG((base) + 0x358 + 4) diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 51f2a03847f9..917a088c28f2 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -37,6 +37,12 @@ #define GMD_ID XE_REG(0xd8c) #define GMD_ID_ARCH_MASK REG_GENMASK(31, 22) #define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14) +/* + * Spec defines these bits as "Reserved", but then make them assume some + * meaning that depends on the ARCH. To avoid any confusion, call them + * SUBIP_FLAG_MASK. + */ +#define GMD_ID_SUBIP_FLAG_MASK REG_GENMASK(13, 6) #define GMD_ID_REVID REG_GENMASK(5, 0) #define FORCEWAKE_ACK_GSC XE_REG(0xdf8) @@ -95,7 +101,6 @@ #define XE2_LMEM_CFG XE_REG(0x48b0) -#define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4) #define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910) #define XEHP_FLAT_CCS_PTR REG_GENMASK(31, 8) @@ -168,6 +173,7 @@ #define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED) #define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14) +#define FAST_CLEAR_VALIGN_FIX REG_BIT(13) #define XE2LPM_CCCHKNREG1 XE_REG(0x82a8) @@ -239,6 +245,9 @@ #define XE2_GT_GEOMETRY_DSS_1 XE_REG(0x9150) #define XE2_GT_GEOMETRY_DSS_2 XE_REG(0x9154) +#define SERVICE_COPY_ENABLE XE_REG(0x9170) +#define FUSE_SERVICE_COPY_ENABLE_MASK REG_GENMASK(7, 0) + #define GDRST XE_REG(0x941c) #define GRDOM_GUC REG_BIT(3) #define GRDOM_FULL REG_BIT(0) @@ -346,10 +355,6 @@ #define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n)) #define VDN_MFXVDENC_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n)) -#define CTC_MODE XE_REG(0xa26c) -#define CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1) -#define CTC_SOURCE_DIVIDE_LOGIC REG_BIT(0) - #define FORCEWAKE_RENDER XE_REG(0xa278) #define POWERGATE_DOMAIN_STATUS XE_REG(0xa2a0) @@ -545,6 +550,9 @@ #define SARB_CHICKEN1 XE_REG_MCR(0xe90c) #define COMP_CKN_IN REG_GENMASK(30, 29) +#define MAIN_GAMCTRL_MODE XE_REG(0xef00) +#define MAIN_GAMCTRL_QUEUE_SELECT REG_BIT(0) + #define RCU_MODE XE_REG(0x14800, XE_REG_OPTION_MASKED) #define RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1) #define RCU_MODE_CCS_ENABLE REG_BIT(0) @@ -581,6 +589,7 @@ #define GT_GFX_RC6 XE_REG(0x138108) #define GT0_PERF_LIMIT_REASONS XE_REG(0x1381a8) +/* Common performance limit reason bits - available on all platforms */ #define GT0_PERF_LIMIT_REASONS_MASK 0xde3 #define PROCHOT_MASK REG_BIT(0) #define THERMAL_LIMIT_MASK REG_BIT(1) @@ -590,6 +599,18 @@ #define POWER_LIMIT_4_MASK REG_BIT(8) #define POWER_LIMIT_1_MASK REG_BIT(10) #define POWER_LIMIT_2_MASK REG_BIT(11) +/* Platform-specific performance limit reason bits - for Crescent Island */ +#define CRI_PERF_LIMIT_REASONS_MASK 0xfdff +#define SOC_THERMAL_LIMIT_MASK REG_BIT(1) +#define MEM_THERMAL_MASK REG_BIT(2) +#define VR_THERMAL_MASK REG_BIT(3) +#define ICCMAX_MASK REG_BIT(4) +#define SOC_AVG_THERMAL_MASK REG_BIT(6) +#define FASTVMODE_MASK REG_BIT(7) +#define PSYS_PL1_MASK REG_BIT(12) +#define PSYS_PL2_MASK REG_BIT(13) +#define P0_FREQ_MASK REG_BIT(14) +#define PSYS_CRIT_MASK REG_BIT(15) #define GT_PERF_STATUS XE_REG(0x1381b4) #define VOLTAGE_MASK REG_GENMASK(10, 0) diff --git a/drivers/gpu/drm/xe/regs/xe_i2c_regs.h b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h index af781c8e4a80..f2e455e2bfe4 100644 --- a/drivers/gpu/drm/xe/regs/xe_i2c_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h @@ -14,6 +14,9 @@ #define REG_SG_REMAP_ADDR_PREFIX XE_REG(SOC_BASE + 0x0164) #define REG_SG_REMAP_ADDR_POSTFIX XE_REG(SOC_BASE + 0x0168) +#define I2C_BRIDGE_PCICFGCTL XE_REG(I2C_BRIDGE_OFFSET + 0x200) +#define ACPI_INTR_EN REG_BIT(1) + #define I2C_CONFIG_CMD XE_REG(I2C_CONFIG_SPACE_OFFSET + PCI_COMMAND) #define I2C_CONFIG_PMCSR XE_REG(I2C_CONFIG_SPACE_OFFSET + 0x84) diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h index 7c2a3a140142..2f97662d958d 100644 --- a/drivers/gpu/drm/xe/regs/xe_irq_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h @@ -65,7 +65,10 @@ #define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF) #define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF) #define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF) +#define VCS4_VCS5_INTR_MASK XE_REG(0x1900b0, XE_REG_OPTION_VF) +#define VCS6_VCS7_INTR_MASK XE_REG(0x1900b4, XE_REG_OPTION_VF) #define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF) +#define VECS2_VECS3_INTR_MASK XE_REG(0x1900d4, XE_REG_OPTION_VF) #define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4) #define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF) #define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF) @@ -80,9 +83,10 @@ #define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) #define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8) #define GSC_ER_COMPLETE REG_BIT(5) -#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4) +#define GT_FLUSH_COMPLETE_INTERRUPT REG_BIT(4) #define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3) -#define GT_RENDER_USER_INTERRUPT REG_BIT(0) +#define GT_COMPUTE_WALKER_INTERRUPT REG_BIT(2) +#define GT_MI_USER_INTERRUPT REG_BIT(0) /* irqs for OTHER_KCR_INSTANCE */ #define KCR_PXP_STATE_TERMINATED_INTERRUPT REG_BIT(1) diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h index 264e9baf949c..0f79c0714454 100644 --- a/drivers/gpu/drm/xe/regs/xe_pmt.h +++ b/drivers/gpu/drm/xe/regs/xe_pmt.h @@ -24,6 +24,7 @@ #define BMG_MODS_RESIDENCY_OFFSET (0x4D0) #define BMG_G2_RESIDENCY_OFFSET (0x530) #define BMG_G6_RESIDENCY_OFFSET (0x538) +#define BMG_G7_RESIDENCY_OFFSET (0x4B0) #define BMG_G8_RESIDENCY_OFFSET (0x540) #define BMG_G10_RESIDENCY_OFFSET (0x548) diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h index 1926b4044314..ad93c57edd17 100644 --- a/drivers/gpu/drm/xe/regs/xe_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_regs.h @@ -40,6 +40,8 @@ #define STOLEN_RESERVED XE_REG(0x1082c0) #define WOPCM_SIZE_MASK REG_GENMASK64(9, 7) +#define SG_TILE_ADDR_RANGE(_idx) XE_REG(0x1083a0 + (_idx) * 4) + #define MTL_RP_STATE_CAP XE_REG(0x138000) #define MTL_GT_RPA_FREQUENCY XE_REG(0x138008) diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c index a7e548a2bdfb..5df98de5ba3c 100644 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c @@ -31,6 +31,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported, struct drm_exec *exec) { struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv); + struct dma_buf_attachment *attach; u32 mem_type; int ret; @@ -46,7 +47,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported, mem_type = XE_PL_TT; else if (params->force_different_devices && !is_dynamic(params) && (params->mem_mask & XE_BO_FLAG_SYSTEM)) - /* Pin migrated to TT */ + /* Pin migrated to TT on non-dynamic attachments. */ mem_type = XE_PL_TT; if (!xe_bo_is_mem_type(exported, mem_type)) { @@ -88,6 +89,18 @@ static void check_residency(struct kunit *test, struct xe_bo *exported, KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type)); + /* Check that we can pin without migrating. */ + attach = list_first_entry_or_null(&dmabuf->attachments, typeof(*attach), node); + if (attach) { + int err = dma_buf_pin(attach); + + if (!err) { + KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type)); + dma_buf_unpin(attach); + } + KUNIT_EXPECT_EQ(test, err, 0); + } + if (params->force_different_devices) KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT)); else @@ -150,7 +163,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) xe_bo_lock(import_bo, false); err = xe_bo_validate(import_bo, NULL, false, exec); - /* Pinning in VRAM is not allowed. */ + /* Pinning in VRAM is not allowed for non-dynamic attachments */ if (!is_dynamic(params) && params->force_different_devices && !(params->mem_mask & XE_BO_FLAG_SYSTEM)) diff --git a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c new file mode 100644 index 000000000000..42bfc4bcfbcf --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <kunit/static_stub.h> +#include <kunit/test.h> +#include <kunit/test-bug.h> + +#include "xe_kunit_helpers.h" +#include "xe_pci_test.h" + +#define TEST_MAX_VFS 63 + +static void pf_set_admin_mode(struct xe_device *xe, bool enable) +{ + /* should match logic of xe_sriov_pf_admin_only() */ + xe->info.probe_display = !enable; + KUNIT_EXPECT_EQ(kunit_get_current_test(), enable, xe_sriov_pf_admin_only(xe)); +} + +static const void *num_vfs_gen_param(struct kunit *test, const void *prev, char *desc) +{ + unsigned long next = 1 + (unsigned long)prev; + + if (next > TEST_MAX_VFS) + return NULL; + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%lu VF%s", + next, str_plural(next)); + return (void *)next; +} + +static int pf_gt_config_test_init(struct kunit *test) +{ + struct xe_pci_fake_data fake = { + .sriov_mode = XE_SRIOV_MODE_PF, + .platform = XE_TIGERLAKE, /* any random platform with SR-IOV */ + .subplatform = XE_SUBPLATFORM_NONE, + }; + struct xe_device *xe; + struct xe_gt *gt; + + test->priv = &fake; + xe_kunit_helper_xe_device_test_init(test); + + xe = test->priv; + KUNIT_ASSERT_TRUE(test, IS_SRIOV_PF(xe)); + + gt = xe_root_mmio_gt(xe); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gt); + test->priv = gt; + + /* pretend it can support up to 63 VFs */ + xe->sriov.pf.device_total_vfs = TEST_MAX_VFS; + xe->sriov.pf.driver_max_vfs = TEST_MAX_VFS; + KUNIT_ASSERT_EQ(test, xe_sriov_pf_get_totalvfs(xe), 63); + + pf_set_admin_mode(xe, false); + KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0); + + /* more sanity checks */ + KUNIT_EXPECT_EQ(test, GUC_ID_MAX + 1, SZ_64K); + KUNIT_EXPECT_EQ(test, GUC_NUM_DOORBELLS, SZ_256); + + return 0; +} + +static void fair_contexts_1vf(struct kunit *test) +{ + struct xe_gt *gt = test->priv; + struct xe_device *xe = gt_to_xe(gt); + + pf_set_admin_mode(xe, false); + KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe)); + KUNIT_EXPECT_EQ(test, SZ_32K, pf_profile_fair_ctxs(gt, 1)); + + pf_set_admin_mode(xe, true); + KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe)); + KUNIT_EXPECT_EQ(test, SZ_64K - SZ_1K, pf_profile_fair_ctxs(gt, 1)); +} + +static void fair_contexts(struct kunit *test) +{ + unsigned int num_vfs = (unsigned long)test->param_value; + struct xe_gt *gt = test->priv; + struct xe_device *xe = gt_to_xe(gt); + + pf_set_admin_mode(xe, false); + KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe)); + + KUNIT_EXPECT_TRUE(test, is_power_of_2(pf_profile_fair_ctxs(gt, num_vfs))); + KUNIT_EXPECT_GT(test, GUC_ID_MAX, num_vfs * pf_profile_fair_ctxs(gt, num_vfs)); + + if (num_vfs > 31) + KUNIT_ASSERT_EQ(test, SZ_1K, pf_profile_fair_ctxs(gt, num_vfs)); + else if (num_vfs > 15) + KUNIT_ASSERT_EQ(test, SZ_2K, pf_profile_fair_ctxs(gt, num_vfs)); + else if (num_vfs > 7) + KUNIT_ASSERT_EQ(test, SZ_4K, pf_profile_fair_ctxs(gt, num_vfs)); + else if (num_vfs > 3) + KUNIT_ASSERT_EQ(test, SZ_8K, pf_profile_fair_ctxs(gt, num_vfs)); + else if (num_vfs > 1) + KUNIT_ASSERT_EQ(test, SZ_16K, pf_profile_fair_ctxs(gt, num_vfs)); + else + KUNIT_ASSERT_EQ(test, SZ_32K, pf_profile_fair_ctxs(gt, num_vfs)); +} + +static void fair_doorbells_1vf(struct kunit *test) +{ + struct xe_gt *gt = test->priv; + struct xe_device *xe = gt_to_xe(gt); + + pf_set_admin_mode(xe, false); + KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe)); + KUNIT_EXPECT_EQ(test, 128, pf_profile_fair_dbs(gt, 1)); + + pf_set_admin_mode(xe, true); + KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe)); + KUNIT_EXPECT_EQ(test, 240, pf_profile_fair_dbs(gt, 1)); +} + +static void fair_doorbells(struct kunit *test) +{ + unsigned int num_vfs = (unsigned long)test->param_value; + struct xe_gt *gt = test->priv; + struct xe_device *xe = gt_to_xe(gt); + + pf_set_admin_mode(xe, false); + KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe)); + + KUNIT_EXPECT_TRUE(test, is_power_of_2(pf_profile_fair_dbs(gt, num_vfs))); + KUNIT_EXPECT_GE(test, GUC_NUM_DOORBELLS, (num_vfs + 1) * pf_profile_fair_dbs(gt, num_vfs)); + + if (num_vfs > 31) + KUNIT_ASSERT_EQ(test, SZ_4, pf_profile_fair_dbs(gt, num_vfs)); + else if (num_vfs > 15) + KUNIT_ASSERT_EQ(test, SZ_8, pf_profile_fair_dbs(gt, num_vfs)); + else if (num_vfs > 7) + KUNIT_ASSERT_EQ(test, SZ_16, pf_profile_fair_dbs(gt, num_vfs)); + else if (num_vfs > 3) + KUNIT_ASSERT_EQ(test, SZ_32, pf_profile_fair_dbs(gt, num_vfs)); + else if (num_vfs > 1) + KUNIT_ASSERT_EQ(test, SZ_64, pf_profile_fair_dbs(gt, num_vfs)); + else + KUNIT_ASSERT_EQ(test, SZ_128, pf_profile_fair_dbs(gt, num_vfs)); +} + +static void fair_ggtt_1vf(struct kunit *test) +{ + struct xe_gt *gt = test->priv; + struct xe_device *xe = gt_to_xe(gt); + + pf_set_admin_mode(xe, false); + KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe)); + KUNIT_EXPECT_EQ(test, SZ_2G, pf_profile_fair_ggtt(gt, 1)); + + pf_set_admin_mode(xe, true); + KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe)); + KUNIT_EXPECT_EQ(test, SZ_2G + SZ_1G + SZ_512M, pf_profile_fair_ggtt(gt, 1)); +} + +static void fair_ggtt(struct kunit *test) +{ + unsigned int num_vfs = (unsigned long)test->param_value; + struct xe_gt *gt = test->priv; + struct xe_device *xe = gt_to_xe(gt); + u64 alignment = pf_get_ggtt_alignment(gt); + u64 shareable = SZ_2G + SZ_1G + SZ_512M; + + pf_set_admin_mode(xe, false); + KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe)); + + KUNIT_EXPECT_TRUE(test, IS_ALIGNED(pf_profile_fair_ggtt(gt, num_vfs), alignment)); + KUNIT_EXPECT_GE(test, shareable, num_vfs * pf_profile_fair_ggtt(gt, num_vfs)); + + if (num_vfs > 56) + KUNIT_ASSERT_EQ(test, SZ_64M - SZ_8M, pf_profile_fair_ggtt(gt, num_vfs)); + else if (num_vfs > 28) + KUNIT_ASSERT_EQ(test, SZ_64M, pf_profile_fair_ggtt(gt, num_vfs)); + else if (num_vfs > 14) + KUNIT_ASSERT_EQ(test, SZ_128M, pf_profile_fair_ggtt(gt, num_vfs)); + else if (num_vfs > 7) + KUNIT_ASSERT_EQ(test, SZ_256M, pf_profile_fair_ggtt(gt, num_vfs)); + else if (num_vfs > 3) + KUNIT_ASSERT_EQ(test, SZ_512M, pf_profile_fair_ggtt(gt, num_vfs)); + else if (num_vfs > 1) + KUNIT_ASSERT_EQ(test, SZ_1G, pf_profile_fair_ggtt(gt, num_vfs)); + else + KUNIT_ASSERT_EQ(test, SZ_2G, pf_profile_fair_ggtt(gt, num_vfs)); +} + +static struct kunit_case pf_gt_config_test_cases[] = { + KUNIT_CASE(fair_contexts_1vf), + KUNIT_CASE(fair_doorbells_1vf), + KUNIT_CASE(fair_ggtt_1vf), + KUNIT_CASE_PARAM(fair_contexts, num_vfs_gen_param), + KUNIT_CASE_PARAM(fair_doorbells, num_vfs_gen_param), + KUNIT_CASE_PARAM(fair_ggtt, num_vfs_gen_param), + {} +}; + +static struct kunit_suite pf_gt_config_suite = { + .name = "pf_gt_config", + .test_cases = pf_gt_config_test_cases, + .init = pf_gt_config_test_init, +}; + +kunit_test_suite(pf_gt_config_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index 0e502feaca81..6bb278167aaf 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -49,7 +49,7 @@ static void read_l3cc_table(struct xe_gt *gt, fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { xe_force_wake_put(gt_to_fw(gt), fw_ref); - KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n"); + KUNIT_FAIL_AND_ABORT(test, "Forcewake Failed.\n"); } for (i = 0; i < info->num_mocs_regs; i++) { diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c index 663a79ec960d..f3179b31f13e 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci.c +++ b/drivers/gpu/drm/xe/tests/xe_pci.c @@ -311,8 +311,8 @@ const void *xe_pci_id_gen_param(struct kunit *test, const void *prev, char *desc } EXPORT_SYMBOL_IF_KUNIT(xe_pci_id_gen_param); -static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, - u32 *ver, u32 *revid) +static int fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, + u32 *ver, u32 *revid) { struct kunit *test = kunit_get_current_test(); struct xe_pci_fake_data *data = test->priv; @@ -324,6 +324,8 @@ static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, *ver = data->graphics_verx100; *revid = xe_step_to_gmdid(data->step.graphics); } + + return 0; } static void fake_xe_info_probe_tile_count(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c index 37b344df2dc3..4d10a7e2b570 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci_test.c +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c @@ -44,21 +44,27 @@ static void check_media_ip(struct kunit *test) KUNIT_ASSERT_EQ(test, mask, 0); } -static void check_platform_gt_count(struct kunit *test) +static void check_platform_desc(struct kunit *test) { const struct pci_device_id *pci = test->param_value; const struct xe_device_desc *desc = (const struct xe_device_desc *)pci->driver_data; - int max_gt = desc->max_gt_per_tile; - KUNIT_ASSERT_GT(test, max_gt, 0); - KUNIT_ASSERT_LE(test, max_gt, XE_MAX_GT_PER_TILE); + KUNIT_EXPECT_GT(test, desc->dma_mask_size, 0); + + KUNIT_EXPECT_GT(test, (unsigned int)desc->max_gt_per_tile, 0); + KUNIT_EXPECT_LE(test, (unsigned int)desc->max_gt_per_tile, XE_MAX_GT_PER_TILE); + + KUNIT_EXPECT_GT(test, desc->va_bits, 0); + KUNIT_EXPECT_LE(test, desc->va_bits, 64); + + KUNIT_EXPECT_GT(test, desc->vm_max_level, 0); } static struct kunit_case xe_pci_tests[] = { KUNIT_CASE_PARAM(check_graphics_ip, xe_pci_graphics_ip_gen_param), KUNIT_CASE_PARAM(check_media_ip, xe_pci_media_ip_gen_param), - KUNIT_CASE_PARAM(check_platform_gt_count, xe_pci_id_gen_param), + KUNIT_CASE_PARAM(check_platform_desc, xe_pci_id_gen_param), {} }; diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c index b0254b014fe4..d2255a59e58f 100644 --- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c +++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c @@ -48,12 +48,14 @@ struct rtp_test_case { const struct xe_rtp_entry *entries; }; -static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe) +static bool match_yes(const struct xe_device *xe, const struct xe_gt *gt, + const struct xe_hw_engine *hwe) { return true; } -static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe) +static bool match_no(const struct xe_device *xe, const struct xe_gt *gt, + const struct xe_hw_engine *hwe) { return false; } diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 4410e28dee54..b0bd31d14bb9 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -9,6 +9,7 @@ #include <linux/nospec.h> #include <drm/drm_drv.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_managed.h> #include <drm/ttm/ttm_backup.h> @@ -34,6 +35,7 @@ #include "xe_res_cursor.h" #include "xe_shrinker.h" #include "xe_sriov_vf_ccs.h" +#include "xe_tile.h" #include "xe_trace_bo.h" #include "xe_ttm_stolen_mgr.h" #include "xe_vm.h" @@ -81,6 +83,10 @@ static struct ttm_placement tt_placement = { .placement = tt_placement_flags, }; +#define for_each_set_bo_vram_flag(bit__, bo_flags__) \ + for (unsigned int __bit_tmp = BIT(0); __bit_tmp <= XE_BO_FLAG_VRAM_MASK; __bit_tmp <<= 1) \ + for_each_if(((bit__) = __bit_tmp) & (bo_flags__) & XE_BO_FLAG_VRAM_MASK) + bool mem_type_is_vram(u32 mem_type) { return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN; @@ -213,6 +219,27 @@ static bool force_contiguous(u32 bo_flags) bo_flags & XE_BO_FLAG_PINNED; } +static u8 vram_bo_flag_to_tile_id(struct xe_device *xe, u32 vram_bo_flag) +{ + xe_assert(xe, vram_bo_flag & XE_BO_FLAG_VRAM_MASK); + xe_assert(xe, (vram_bo_flag & (vram_bo_flag - 1)) == 0); + + return __ffs(vram_bo_flag >> (__ffs(XE_BO_FLAG_VRAM0) - 1)) - 1; +} + +static u32 bo_vram_flags_to_vram_placement(struct xe_device *xe, u32 bo_flags, u32 vram_flag, + enum ttm_bo_type type) +{ + u8 tile_id = vram_bo_flag_to_tile_id(xe, vram_flag); + + xe_assert(xe, tile_id < xe->info.tile_count); + + if (type == ttm_bo_type_kernel && !(bo_flags & XE_BO_FLAG_FORCE_USER_VRAM)) + return xe->tiles[tile_id].mem.kernel_vram->placement; + else + return xe->tiles[tile_id].mem.vram->placement; +} + static void add_vram(struct xe_device *xe, struct xe_bo *bo, struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c) { @@ -245,12 +272,15 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo, } static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, - u32 bo_flags, u32 *c) + u32 bo_flags, enum ttm_bo_type type, u32 *c) { - if (bo_flags & XE_BO_FLAG_VRAM0) - add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); - if (bo_flags & XE_BO_FLAG_VRAM1) - add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); + u32 vram_flag; + + for_each_set_bo_vram_flag(vram_flag, bo_flags) { + u32 pl = bo_vram_flags_to_vram_placement(xe, bo_flags, vram_flag, type); + + add_vram(xe, bo, bo->placements, bo_flags, pl, c); + } } static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, @@ -269,11 +299,11 @@ static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, } static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, - u32 bo_flags) + u32 bo_flags, enum ttm_bo_type type) { u32 c = 0; - try_add_vram(xe, bo, bo_flags, &c); + try_add_vram(xe, bo, bo_flags, type, &c); try_add_system(xe, bo, bo_flags, &c); try_add_stolen(xe, bo, bo_flags, &c); @@ -289,10 +319,10 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, } int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, - u32 bo_flags) + u32 bo_flags, enum ttm_bo_type type) { xe_bo_assert_held(bo); - return __xe_bo_placement_for_flags(xe, bo, bo_flags); + return __xe_bo_placement_for_flags(xe, bo, bo_flags, type); } static void xe_evict_flags(struct ttm_buffer_object *tbo, @@ -580,6 +610,23 @@ static bool xe_ttm_resource_visible(struct ttm_resource *mem) return vres->used_visible_size == mem->size; } +/** + * xe_bo_is_visible_vram - check if BO is placed entirely in visible VRAM. + * @bo: The BO + * + * This function checks whether a given BO resides entirely in memory visible from the CPU + * + * Returns: true if the BO is entirely visible, false otherwise. + * + */ +bool xe_bo_is_visible_vram(struct xe_bo *bo) +{ + if (drm_WARN_ON(bo->ttm.base.dev, !xe_bo_is_vram(bo))) + return false; + + return xe_ttm_resource_visible(bo->ttm.resource); +} + static int xe_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { @@ -1605,7 +1652,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo, if (!mem_type_is_vram(ttm_bo->resource->mem_type)) return -EIO; - if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) { + if (!xe_bo_is_visible_vram(bo) || len >= SZ_16K) { struct xe_migrate *migrate = mem_type_to_migrate(xe, ttm_bo->resource->mem_type); @@ -1708,7 +1755,7 @@ static void xe_gem_object_free(struct drm_gem_object *obj) * refcount directly if needed. */ __xe_bo_vunmap(gem_to_xe_bo(obj)); - ttm_bo_put(container_of(obj, struct ttm_buffer_object, base)); + ttm_bo_fini(container_of(obj, struct ttm_buffer_object, base)); } static void xe_gem_object_close(struct drm_gem_object *obj, @@ -2075,7 +2122,7 @@ void xe_bo_free(struct xe_bo *bo) * if the function should allocate a new one. * @tile: The tile to select for migration of this bo, and the tile used for * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. - * @resv: Pointer to a locked shared reservation object to use fo this bo, + * @resv: Pointer to a locked shared reservation object to use for this bo, * or NULL for the xe_bo to use its own. * @bulk: The bulk move to use for LRU bumping, or NULL for external bos. * @size: The storage size to use for the bo. @@ -2164,7 +2211,7 @@ struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo, xe_validation_assert_exec(xe, exec, &bo->ttm.base); if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) { - err = __xe_bo_placement_for_flags(xe, bo, bo->flags); + err = __xe_bo_placement_for_flags(xe, bo, bo->flags, type); if (WARN_ON(err)) { xe_ttm_bo_destroy(&bo->ttm); return ERR_PTR(err); @@ -2222,34 +2269,37 @@ struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo, } static int __xe_bo_fixed_placement(struct xe_device *xe, - struct xe_bo *bo, + struct xe_bo *bo, enum ttm_bo_type type, u32 flags, u64 start, u64 end, u64 size) { struct ttm_place *place = bo->placements; + u32 vram_flag, vram_stolen_flags; + + /* + * to allow fixed placement in GGTT of a VF, post-migration fixups would have to + * include selecting a new fixed offset and shifting the page ranges for it + */ + xe_assert(xe, !IS_SRIOV_VF(xe) || !(bo->flags & XE_BO_FLAG_GGTT)); if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM)) return -EINVAL; + vram_flag = flags & XE_BO_FLAG_VRAM_MASK; + vram_stolen_flags = (flags & (XE_BO_FLAG_STOLEN)) | vram_flag; + + /* check if more than one VRAM/STOLEN flag is set */ + if (hweight32(vram_stolen_flags) > 1) + return -EINVAL; + place->flags = TTM_PL_FLAG_CONTIGUOUS; place->fpfn = start >> PAGE_SHIFT; place->lpfn = end >> PAGE_SHIFT; - switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) { - case XE_BO_FLAG_VRAM0: - place->mem_type = XE_PL_VRAM0; - break; - case XE_BO_FLAG_VRAM1: - place->mem_type = XE_PL_VRAM1; - break; - case XE_BO_FLAG_STOLEN: + if (flags & XE_BO_FLAG_STOLEN) place->mem_type = XE_PL_STOLEN; - break; - - default: - /* 0 or multiple of the above set */ - return -EINVAL; - } + else + place->mem_type = bo_vram_flags_to_vram_placement(xe, flags, vram_flag, type); bo->placement = (struct ttm_placement) { .num_placement = 1, @@ -2278,7 +2328,7 @@ __xe_bo_create_locked(struct xe_device *xe, return bo; flags |= XE_BO_FLAG_FIXED_PLACEMENT; - err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size); + err = __xe_bo_fixed_placement(xe, bo, type, flags, start, end, size); if (err) { xe_bo_free(bo); return ERR_PTR(err); @@ -2602,7 +2652,7 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, * @size: The storage size to use for the bo. * @type: The TTM buffer object type. * @flags: XE_BO_FLAG_ flags. - * @intr: Whether to execut any waits for backing store interruptible. + * @intr: Whether to execute any waits for backing store interruptible. * * Create a pinned and mapped bo. The bo will be external and not associated * with a VM. @@ -3577,14 +3627,13 @@ int xe_bo_dumb_create(struct drm_file *file_priv, struct xe_device *xe = to_xe_device(dev); struct xe_bo *bo; uint32_t handle; - int cpp = DIV_ROUND_UP(args->bpp, 8); int err; u32 page_size = max_t(u32, PAGE_SIZE, xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); - args->pitch = ALIGN(args->width * cpp, 64); - args->size = ALIGN(mul_u32_u32(args->pitch, args->height), - page_size); + err = drm_mode_size_dumb(dev, args, SZ_64, page_size); + if (err) + return err; bo = xe_bo_create_user(xe, NULL, args->size, DRM_XE_GEM_CPU_CACHING_WC, diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index a77af42b5f9e..911d5b90461a 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -49,6 +49,7 @@ #define XE_BO_FLAG_GGTT2 BIT(22) #define XE_BO_FLAG_GGTT3 BIT(23) #define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24) +#define XE_BO_FLAG_FORCE_USER_VRAM BIT(25) /* this one is trigger internally only */ #define XE_BO_FLAG_INTERNAL_TEST BIT(30) @@ -122,7 +123,7 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src); int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, - u32 bo_flags); + u32 bo_flags, enum ttm_bo_type type); static inline struct xe_bo *ttm_to_xe_bo(const struct ttm_buffer_object *bo) { @@ -273,6 +274,7 @@ int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size); bool mem_type_is_vram(u32 mem_type); bool xe_bo_is_vram(struct xe_bo *bo); +bool xe_bo_is_visible_vram(struct xe_bo *bo); bool xe_bo_is_stolen(struct xe_bo *bo); bool xe_bo_is_stolen_devmem(struct xe_bo *bo); bool xe_bo_is_vm_bound(struct xe_bo *bo); diff --git a/drivers/gpu/drm/xe/xe_bo_doc.h b/drivers/gpu/drm/xe/xe_bo_doc.h index 25a884c64bf1..401e7dd26ef3 100644 --- a/drivers/gpu/drm/xe/xe_bo_doc.h +++ b/drivers/gpu/drm/xe/xe_bo_doc.h @@ -12,7 +12,7 @@ * BO management * ============= * - * TTM manages (placement, eviction, etc...) all BOs in XE. + * TTM manages (placement, eviction, etc...) all BOs in Xe. * * BO creation * =========== @@ -29,7 +29,7 @@ * a kernel BO (e.g. engine state, memory for page tables, etc...). These BOs * are typically mapped in the GGTT (any kernel BOs aside memory for page tables * are in the GGTT), are pinned (can't move or be evicted at runtime), have a - * vmap (XE can access the memory via xe_map layer) and have contiguous physical + * vmap (Xe can access the memory via xe_map layer) and have contiguous physical * memory. * * More details of why kernel BOs are pinned and contiguous below. @@ -40,7 +40,7 @@ * A user BO is created via the DRM_IOCTL_XE_GEM_CREATE IOCTL. Once it is * created the BO can be mmap'd (via DRM_IOCTL_XE_GEM_MMAP_OFFSET) for user * access and it can be bound for GPU access (via DRM_IOCTL_XE_VM_BIND). All - * user BOs are evictable and user BOs are never pinned by XE. The allocation of + * user BOs are evictable and user BOs are never pinned by Xe. The allocation of * the backing store can be deferred from creation time until first use which is * either mmap, bind, or pagefault. * @@ -84,7 +84,7 @@ * ==================== * * All eviction (or in other words, moving a BO from one memory location to - * another) is routed through TTM with a callback into XE. + * another) is routed through TTM with a callback into Xe. * * Runtime eviction * ---------------- diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index bc5b4c5fab81..7661fca7f278 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -73,6 +73,11 @@ int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe) &xe->pinned.late.kernel_bo_present, xe_bo_notifier_prepare_pinned); + if (!ret) + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external, + &xe->pinned.late.external, + xe_bo_notifier_prepare_pinned); + return ret; } @@ -93,6 +98,10 @@ void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe) (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present, &xe->pinned.late.kernel_bo_present, xe_bo_notifier_unprepare_pinned); + + (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external, + &xe->pinned.late.external, + xe_bo_notifier_unprepare_pinned); } /** diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c index 139663423185..9f6251b1008b 100644 --- a/drivers/gpu/drm/xe/xe_configfs.c +++ b/drivers/gpu/drm/xe/xe_configfs.c @@ -15,9 +15,11 @@ #include "instructions/xe_mi_commands.h" #include "xe_configfs.h" +#include "xe_gt_types.h" #include "xe_hw_engine_types.h" #include "xe_module.h" #include "xe_pci_types.h" +#include "xe_sriov_types.h" /** * DOC: Xe Configfs @@ -25,7 +27,7 @@ * Overview * ======== * - * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a + * Configfs is a filesystem-based manager of kernel objects. Xe KMD registers a * configfs subsystem called ``xe`` that creates a directory in the mounted * configfs directory. The user can create devices under this directory and * configure them as necessary. See Documentation/filesystems/configfs.rst for @@ -56,6 +58,7 @@ * : * └── 0000:03:00.0 * ├── survivability_mode + * ├── gt_types_allowed * ├── engines_allowed * └── enable_psmi * @@ -79,6 +82,44 @@ * * This attribute can only be set before binding to the device. * + * Allowed GT types: + * ----------------- + * + * Allow only specific types of GTs to be detected and initialized by the + * driver. Any combination of GT types can be enabled/disabled, although + * some settings will cause the device to fail to probe. + * + * Writes support both comma- and newline-separated input format. Reads + * will always return one GT type per line. "primary" and "media" are the + * GT type names supported by this interface. + * + * This attribute can only be set before binding to the device. + * + * Examples: + * + * Allow both primary and media GTs to be initialized and used. This matches + * the driver's default behavior:: + * + * # echo 'primary,media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed + * + * Allow only the primary GT of each tile to be initialized and used, + * effectively disabling the media GT if it exists on the platform:: + * + * # echo 'primary' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed + * + * Allow only the media GT of each tile to be initialized and used, + * effectively disabling the primary GT. **This configuration will cause + * device probe failure on all current platforms, but may be allowed on + * igpu platforms in the future**:: + * + * # echo 'media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed + * + * Disable all GTs. Only other GPU IP (such as display) is potentially usable. + * **This configuration will cause device probe failure on all current + * platforms, but may be allowed on igpu platforms in the future**:: + * + * # echo '' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed + * * Allowed engines: * ---------------- * @@ -169,6 +210,32 @@ * Currently this is implemented only for post and mid context restore and * these attributes can only be set before binding to the device. * + * Max SR-IOV Virtual Functions + * ---------------------------- + * + * This config allows to limit number of the Virtual Functions (VFs) that can + * be managed by the Physical Function (PF) driver, where value 0 disables the + * PF mode (no VFs). + * + * The default max_vfs config value is taken from the max_vfs modparam. + * + * How to enable PF with support with unlimited (up to HW limit) number of VFs:: + * + * # echo unlimited > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs + * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind + * + * How to enable PF with support up to 3 VFs:: + * + * # echo 3 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs + * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind + * + * How to disable PF mode and always run as native:: + * + * # echo 0 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs + * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind + * + * This setting only takes effect when probing the device. + * * Remove devices * ============== * @@ -185,30 +252,44 @@ struct wa_bb { struct xe_config_group_device { struct config_group group; + struct config_group sriov; struct xe_config_device { + u64 gt_types_allowed; u64 engines_allowed; struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX]; struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX]; bool survivability_mode; bool enable_psmi; + struct { + unsigned int max_vfs; + } sriov; } config; /* protects attributes */ struct mutex lock; /* matching descriptor */ const struct xe_device_desc *desc; + /* tentative SR-IOV mode */ + enum xe_sriov_mode mode; }; static const struct xe_config_device device_defaults = { + .gt_types_allowed = U64_MAX, .engines_allowed = U64_MAX, .survivability_mode = false, .enable_psmi = false, + .sriov = { + .max_vfs = UINT_MAX, + }, }; static void set_device_defaults(struct xe_config_device *config) { *config = device_defaults; +#ifdef CONFIG_PCI_IOV + config->sriov.max_vfs = xe_modparam.max_vfs; +#endif } struct engine_info { @@ -230,6 +311,14 @@ static const struct engine_info engine_info[] = { { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER }, }; +static const struct { + const char *name; + enum xe_gt_type type; +} gt_types[] = { + { .name = "primary", .type = XE_GT_TYPE_MAIN }, + { .name = "media", .type = XE_GT_TYPE_MEDIA }, +}; + static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item) { return container_of(to_config_group(item), struct xe_config_group_device, group); @@ -292,6 +381,57 @@ static ssize_t survivability_mode_store(struct config_item *item, const char *pa return len; } +static ssize_t gt_types_allowed_show(struct config_item *item, char *page) +{ + struct xe_config_device *dev = to_xe_config_device(item); + char *p = page; + + for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++) + if (dev->gt_types_allowed & BIT_ULL(gt_types[i].type)) + p += sprintf(p, "%s\n", gt_types[i].name); + + return p - page; +} + +static ssize_t gt_types_allowed_store(struct config_item *item, const char *page, + size_t len) +{ + struct xe_config_group_device *dev = to_xe_config_group_device(item); + char *buf __free(kfree) = kstrdup(page, GFP_KERNEL); + char *p = buf; + u64 typemask = 0; + + if (!buf) + return -ENOMEM; + + while (p) { + char *typename = strsep(&p, ",\n"); + bool matched = false; + + if (typename[0] == '\0') + continue; + + for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++) { + if (strcmp(typename, gt_types[i].name) == 0) { + typemask |= BIT(gt_types[i].type); + matched = true; + break; + } + } + + if (!matched) + return -EINVAL; + } + + guard(mutex)(&dev->lock); + if (is_bound(dev)) + return -EBUSY; + + dev->config.gt_types_allowed = typemask; + + return len; +} + static ssize_t engines_allowed_show(struct config_item *item, char *page) { struct xe_config_device *dev = to_xe_config_device(item); @@ -672,6 +812,7 @@ CONFIGFS_ATTR(, ctx_restore_mid_bb); CONFIGFS_ATTR(, ctx_restore_post_bb); CONFIGFS_ATTR(, enable_psmi); CONFIGFS_ATTR(, engines_allowed); +CONFIGFS_ATTR(, gt_types_allowed); CONFIGFS_ATTR(, survivability_mode); static struct configfs_attribute *xe_config_device_attrs[] = { @@ -679,6 +820,7 @@ static struct configfs_attribute *xe_config_device_attrs[] = { &attr_ctx_restore_post_bb, &attr_enable_psmi, &attr_engines_allowed, + &attr_gt_types_allowed, &attr_survivability_mode, NULL, }; @@ -721,6 +863,68 @@ static const struct config_item_type xe_config_device_type = { .ct_owner = THIS_MODULE, }; +static ssize_t sriov_max_vfs_show(struct config_item *item, char *page) +{ + struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent); + + guard(mutex)(&dev->lock); + + if (dev->config.sriov.max_vfs == UINT_MAX) + return sprintf(page, "%s\n", "unlimited"); + else + return sprintf(page, "%u\n", dev->config.sriov.max_vfs); +} + +static ssize_t sriov_max_vfs_store(struct config_item *item, const char *page, size_t len) +{ + struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent); + unsigned int max_vfs; + int ret; + + guard(mutex)(&dev->lock); + + if (is_bound(dev)) + return -EBUSY; + + ret = kstrtouint(page, 0, &max_vfs); + if (ret) { + if (!sysfs_streq(page, "unlimited")) + return ret; + max_vfs = UINT_MAX; + } + + dev->config.sriov.max_vfs = max_vfs; + return len; +} + +CONFIGFS_ATTR(sriov_, max_vfs); + +static struct configfs_attribute *xe_config_sriov_attrs[] = { + &sriov_attr_max_vfs, + NULL, +}; + +static bool xe_config_sriov_is_visible(struct config_item *item, + struct configfs_attribute *attr, int n) +{ + struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent); + + if (attr == &sriov_attr_max_vfs && dev->mode != XE_SRIOV_MODE_PF) + return false; + + return true; +} + +static struct configfs_group_operations xe_config_sriov_group_ops = { + .is_visible = xe_config_sriov_is_visible, +}; + +static const struct config_item_type xe_config_sriov_type = { + .ct_owner = THIS_MODULE, + .ct_group_ops = &xe_config_sriov_group_ops, + .ct_attrs = xe_config_sriov_attrs, +}; + static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev) { struct device_driver *driver = driver_find("xe", &pci_bus_type); @@ -746,6 +950,7 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro unsigned int domain, bus, slot, function; struct xe_config_group_device *dev; const struct xe_device_desc *match; + enum xe_sriov_mode mode; struct pci_dev *pdev; char canonical[16]; int vfnumber = 0; @@ -762,6 +967,9 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro return ERR_PTR(-EINVAL); pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function)); + mode = pdev ? dev_is_pf(&pdev->dev) ? + XE_SRIOV_MODE_PF : XE_SRIOV_MODE_NONE : XE_SRIOV_MODE_VF; + if (!pdev && function) pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0)); if (!pdev && slot) @@ -796,9 +1004,15 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro return ERR_PTR(-ENOMEM); dev->desc = match; + dev->mode = match->has_sriov ? mode : XE_SRIOV_MODE_NONE; + set_device_defaults(&dev->config); config_group_init_type_name(&dev->group, name, &xe_config_device_type); + if (dev->mode != XE_SRIOV_MODE_NONE) { + config_group_init_type_name(&dev->sriov, "sriov", &xe_config_sriov_type); + configfs_add_default_group(&dev->sriov, &dev->group); + } mutex_init(&dev->lock); @@ -846,6 +1060,7 @@ static void dump_custom_dev_config(struct pci_dev *pdev, dev->config.attr_); \ } while (0) + PRI_CUSTOM_ATTR("%llx", gt_types_allowed); PRI_CUSTOM_ATTR("%llx", engines_allowed); PRI_CUSTOM_ATTR("%d", enable_psmi); PRI_CUSTOM_ATTR("%d", survivability_mode); @@ -896,6 +1111,44 @@ bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) return mode; } +static u64 get_gt_types_allowed(struct pci_dev *pdev) +{ + struct xe_config_group_device *dev = find_xe_config_group_device(pdev); + u64 mask; + + if (!dev) + return device_defaults.gt_types_allowed; + + mask = dev->config.gt_types_allowed; + config_group_put(&dev->group); + + return mask; +} + +/** + * xe_configfs_primary_gt_allowed - determine whether primary GTs are supported + * @pdev: pci device + * + * Return: True if primary GTs are enabled, false if they have been disabled via + * configfs. + */ +bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev) +{ + return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MAIN); +} + +/** + * xe_configfs_media_gt_allowed - determine whether media GTs are supported + * @pdev: pci device + * + * Return: True if the media GTs are enabled, false if they have been disabled + * via configfs. + */ +bool xe_configfs_media_gt_allowed(struct pci_dev *pdev) +{ + return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MEDIA); +} + /** * xe_configfs_get_engines_allowed - get engine allowed mask from configfs * @pdev: pci device @@ -988,6 +1241,34 @@ u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, return len; } +#ifdef CONFIG_PCI_IOV +/** + * xe_configfs_get_max_vfs() - Get number of VFs that could be managed + * @pdev: the &pci_dev device + * + * Find the configfs group that belongs to the PCI device and return maximum + * number of Virtual Functions (VFs) that could be managed by this device. + * If configfs group is not present, use value of max_vfs module parameter. + * + * Return: maximum number of VFs that could be managed. + */ +unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev) +{ + struct xe_config_group_device *dev = find_xe_config_group_device(pdev); + unsigned int max_vfs; + + if (!dev) + return xe_modparam.max_vfs; + + scoped_guard(mutex, &dev->lock) + max_vfs = dev->config.sriov.max_vfs; + + config_group_put(&dev->group); + + return max_vfs; +} +#endif + int __init xe_configfs_init(void) { int ret; diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h index c61e0e47ed94..fed57be0b90e 100644 --- a/drivers/gpu/drm/xe/xe_configfs.h +++ b/drivers/gpu/drm/xe/xe_configfs.h @@ -17,23 +17,31 @@ int xe_configfs_init(void); void xe_configfs_exit(void); void xe_configfs_check_device(struct pci_dev *pdev); bool xe_configfs_get_survivability_mode(struct pci_dev *pdev); +bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev); +bool xe_configfs_media_gt_allowed(struct pci_dev *pdev); u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev); bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev); u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class, const u32 **cs); u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class, const u32 **cs); +#ifdef CONFIG_PCI_IOV +unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev); +#endif #else static inline int xe_configfs_init(void) { return 0; } static inline void xe_configfs_exit(void) { } static inline void xe_configfs_check_device(struct pci_dev *pdev) { } static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; } +static inline bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev) { return true; } +static inline bool xe_configfs_media_gt_allowed(struct pci_dev *pdev) { return true; } static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; } static inline bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) { return false; } static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class, const u32 **cs) { return 0; } static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class, const u32 **cs) { return 0; } +static inline unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev) { return UINT_MAX; } #endif #endif diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index cd977dbd1ef6..e91da9589c5f 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -23,12 +23,12 @@ #include "xe_psmi.h" #include "xe_pxp_debugfs.h" #include "xe_sriov.h" -#include "xe_sriov_pf.h" +#include "xe_sriov_pf_debugfs.h" #include "xe_sriov_vf.h" #include "xe_step.h" #include "xe_tile_debugfs.h" -#include "xe_wa.h" #include "xe_vsec.h" +#include "xe_wa.h" #ifdef CONFIG_DRM_XE_DEBUG #include "xe_bo_evict.h" @@ -142,6 +142,7 @@ static int dgfx_pkg_residencies_show(struct seq_file *m, void *data) } residencies[] = { {BMG_G2_RESIDENCY_OFFSET, "Package G2"}, {BMG_G6_RESIDENCY_OFFSET, "Package G6"}, + {BMG_G7_RESIDENCY_OFFSET, "Package G7"}, {BMG_G8_RESIDENCY_OFFSET, "Package G8"}, {BMG_G10_RESIDENCY_OFFSET, "Package G10"}, {BMG_MODS_RESIDENCY_OFFSET, "Package ModS"} @@ -349,17 +350,14 @@ static ssize_t disable_late_binding_set(struct file *f, const char __user *ubuf, { struct xe_device *xe = file_inode(f)->i_private; struct xe_late_bind *late_bind = &xe->late_bind; - u32 uval; - ssize_t ret; + bool val; + int ret; - ret = kstrtouint_from_user(ubuf, size, sizeof(uval), &uval); + ret = kstrtobool_from_user(ubuf, size, &val); if (ret) return ret; - if (uval > 1) - return -EINVAL; - - late_bind->disable = !!uval; + late_bind->disable = val; return size; } diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 203e3038cc81..d444eda65ca6 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -106,9 +106,9 @@ static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count, drm_puts(&p, "module: " KBUILD_MODNAME "\n"); ts = ktime_to_timespec64(ss->snapshot_time); - drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); + drm_printf(&p, "Snapshot time: %ptSp\n", &ts); ts = ktime_to_timespec64(ss->boot_time); - drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); + drm_printf(&p, "Uptime: %ptSp\n", &ts); drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid); xe_device_snapshot_print(xe, &p); diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 34d33965eac2..c7d373c70f0f 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -8,6 +8,7 @@ #include <linux/aperture.h> #include <linux/delay.h> #include <linux/fault-inject.h> +#include <linux/iopoll.h> #include <linux/units.h> #include <drm/drm_atomic_helper.h> @@ -51,6 +52,7 @@ #include "xe_nvm.h" #include "xe_oa.h" #include "xe_observation.h" +#include "xe_pagefault.h" #include "xe_pat.h" #include "xe_pcode.h" #include "xe_pm.h" @@ -436,7 +438,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, xe->drm.anon_inode->i_mapping, - xe->drm.vma_offset_manager, false, false); + xe->drm.vma_offset_manager, 0); if (WARN_ON(err)) goto err; @@ -630,16 +632,22 @@ mask_err: return err; } -static bool verify_lmem_ready(struct xe_device *xe) +static int lmem_initializing(struct xe_device *xe) { - u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT; + if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT) + return 0; + + if (signal_pending(current)) + return -EINTR; - return !!val; + return 1; } static int wait_for_lmem_ready(struct xe_device *xe) { - unsigned long timeout, start; + const unsigned long TIMEOUT_SEC = 60; + unsigned long prev_jiffies; + int initializing; if (!IS_DGFX(xe)) return 0; @@ -647,39 +655,35 @@ static int wait_for_lmem_ready(struct xe_device *xe) if (IS_SRIOV_VF(xe)) return 0; - if (verify_lmem_ready(xe)) + if (!lmem_initializing(xe)) return 0; drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); + prev_jiffies = jiffies; - start = jiffies; - timeout = start + secs_to_jiffies(60); /* 60 sec! */ - - do { - if (signal_pending(current)) - return -EINTR; - - /* - * The boot firmware initializes local memory and - * assesses its health. If memory training fails, - * the punit will have been instructed to keep the GT powered - * down.we won't be able to communicate with it - * - * If the status check is done before punit updates the register, - * it can lead to the system being unusable. - * use a timeout and defer the probe to prevent this. - */ - if (time_after(jiffies, timeout)) { - drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); - return -EPROBE_DEFER; - } - - msleep(20); - - } while (!verify_lmem_ready(xe)); + /* + * The boot firmware initializes local memory and + * assesses its health. If memory training fails, + * the punit will have been instructed to keep the GT powered + * down.we won't be able to communicate with it + * + * If the status check is done before punit updates the register, + * it can lead to the system being unusable. + * use a timeout and defer the probe to prevent this. + */ + poll_timeout_us(initializing = lmem_initializing(xe), + initializing <= 0, + 20 * USEC_PER_MSEC, TIMEOUT_SEC * USEC_PER_SEC, true); + if (initializing < 0) + return initializing; + + if (initializing) { + drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); + return -EPROBE_DEFER; + } drm_dbg(&xe->drm, "lmem ready after %ums", - jiffies_to_msecs(jiffies - start)); + jiffies_to_msecs(jiffies - prev_jiffies)); return 0; } @@ -779,6 +783,8 @@ static int probe_has_flat_ccs(struct xe_device *xe) return 0; gt = xe_root_mmio_gt(xe); + if (!gt) + return 0; fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (!fw_ref) @@ -891,6 +897,10 @@ int xe_device_probe(struct xe_device *xe) return err; } + err = xe_pagefault_init(xe); + if (err) + return err; + if (xe->tiles->media_gt && XE_GT_WA(xe->tiles->media_gt, 15015404425_disable)) XE_DEVICE_WA_DISABLE(xe, 15015404425); @@ -988,16 +998,16 @@ void xe_device_shutdown(struct xe_device *xe) drm_dbg(&xe->drm, "Shutting down device\n"); - if (xe_driver_flr_disabled(xe)) { - xe_display_pm_shutdown(xe); + xe_display_pm_shutdown(xe); + + xe_irq_suspend(xe); - xe_irq_suspend(xe); + for_each_gt(gt, xe, id) + xe_gt_shutdown(gt); - for_each_gt(gt, xe, id) - xe_gt_shutdown(gt); + xe_display_pm_shutdown_late(xe); - xe_display_pm_shutdown_late(xe); - } else { + if (!xe_driver_flr_disabled(xe)) { /* BOOM! */ __xe_driver_flr(xe); } @@ -1059,6 +1069,8 @@ void xe_device_l2_flush(struct xe_device *xe) unsigned int fw_ref; gt = xe_root_mmio_gt(xe); + if (!gt) + return; if (!XE_GT_WA(gt, 16023588340)) return; @@ -1104,6 +1116,9 @@ void xe_device_td_flush(struct xe_device *xe) return; root_gt = xe_root_mmio_gt(xe); + if (!root_gt) + return; + if (XE_GT_WA(root_gt, 16023588340)) { /* A transient flush is not sufficient: flush the L2 */ xe_device_l2_flush(xe); @@ -1207,7 +1222,7 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg) * * /sys/bus/pci/devices/<device>/survivability_mode * - * - Admin/userpsace consumer can use firmware flashing tools like fwupd to flash + * - Admin/userspace consumer can use firmware flashing tools like fwupd to flash * firmware and restore device to normal operation. */ diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c index c5151c86a98a..ec9c06b06fb5 100644 --- a/drivers/gpu/drm/xe/xe_device_sysfs.c +++ b/drivers/gpu/drm/xe/xe_device_sysfs.c @@ -38,13 +38,8 @@ vram_d3cold_threshold_show(struct device *dev, { struct pci_dev *pdev = to_pci_dev(dev); struct xe_device *xe = pdev_to_xe_device(pdev); - int ret; - - xe_pm_runtime_get(xe); - ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold); - xe_pm_runtime_put(xe); - return ret; + return sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold); } static ssize_t @@ -173,11 +168,8 @@ static umode_t late_bind_attr_is_visible(struct kobject *kobj, u32 cap = 0; int ret; - xe_pm_runtime_get(xe); - ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), &cap, NULL); - xe_pm_runtime_put(xe); if (ret) return 0; diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 74d7af830b85..0b2fa7c56d38 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -18,6 +18,7 @@ #include "xe_lmtt_types.h" #include "xe_memirq_types.h" #include "xe_oa_types.h" +#include "xe_pagefault_types.h" #include "xe_platform_types.h" #include "xe_pmu_types.h" #include "xe_pt_types.h" @@ -27,6 +28,7 @@ #include "xe_sriov_vf_ccs_types.h" #include "xe_step_types.h" #include "xe_survivability_mode_types.h" +#include "xe_tile_sriov_vf_types.h" #include "xe_validation.h" #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) @@ -158,7 +160,15 @@ struct xe_tile { /** @mem: memory management info for tile */ struct { /** - * @mem.vram: VRAM info for tile. + * @mem.kernel_vram: kernel-dedicated VRAM info for tile. + * + * Although VRAM is associated with a specific tile, it can + * still be accessed by all tiles' GTs. + */ + struct xe_vram_region *kernel_vram; + + /** + * @mem.vram: general purpose VRAM info for tile. * * Although VRAM is associated with a specific tile, it can * still be accessed by all tiles' GTs. @@ -185,6 +195,8 @@ struct xe_tile { struct { /** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */ struct xe_ggtt_node *ggtt_balloon[2]; + /** @sriov.vf.self_config: VF configuration data */ + struct xe_tile_sriov_vf_selfconfig self_config; } vf; } sriov; @@ -211,12 +223,17 @@ struct xe_tile { }; /** - * struct xe_device - Top level struct of XE device + * struct xe_device - Top level struct of Xe device */ struct xe_device { /** @drm: drm device */ struct drm_device drm; +#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) + /** @display: display device data, must be placed after drm device member */ + struct intel_display *display; +#endif + /** @devcoredump: device coredump */ struct xe_devcoredump devcoredump; @@ -234,9 +251,9 @@ struct xe_device { u32 media_verx100; /** @info.mem_region_mask: mask of valid memory regions */ u32 mem_region_mask; - /** @info.platform: XE platform enum */ + /** @info.platform: Xe platform enum */ enum xe_platform platform; - /** @info.subplatform: XE subplatform enum */ + /** @info.subplatform: Xe subplatform enum */ enum xe_subplatform subplatform; /** @info.devid: device ID */ u16 devid; @@ -289,6 +306,8 @@ struct xe_device { * pcode mailbox commands. */ u8 has_mbx_power_limits:1; + /** @info.has_mem_copy_instr: Device supports MEM_COPY instruction */ + u8 has_mem_copy_instr:1; /** @info.has_pxp: Device has PXP support */ u8 has_pxp:1; /** @info.has_range_tlb_inval: Has range based TLB invalidations */ @@ -318,6 +337,8 @@ struct xe_device { u8 skip_mtcfg:1; /** @info.skip_pcode: skip access to PCODE uC */ u8 skip_pcode:1; + /** @info.needs_shared_vf_gt_wq: needs shared GT WQ on VF */ + u8 needs_shared_vf_gt_wq:1; } info; /** @wa_active: keep track of active workarounds */ @@ -398,6 +419,16 @@ struct xe_device { u32 next_asid; /** @usm.lock: protects UM state */ struct rw_semaphore lock; + /** @usm.pf_wq: page fault work queue, unbound, high priority */ + struct workqueue_struct *pf_wq; + /* + * We pick 4 here because, in the current implementation, it + * yields the best bandwidth utilization of the kernel paging + * engine. + */ +#define XE_PAGEFAULT_QUEUE_COUNT 4 + /** @usm.pf_queue: Page fault queues */ + struct xe_pagefault_queue pf_queue[XE_PAGEFAULT_QUEUE_COUNT]; } usm; /** @pinned: pinned BO state */ @@ -617,8 +648,6 @@ struct xe_device { * drm_i915_private during build. After cleanup these should go away, * migrating to the right sub-structs */ - struct intel_display *display; - const struct dram_info *dram_info; /* @@ -627,26 +656,14 @@ struct xe_device { */ u32 edram_size_mb; - /* To shut up runtime pm macros.. */ - struct xe_runtime_pm {} runtime_pm; - - /* only to allow build, not used functionally */ - u32 irq_mask; - struct intel_uncore { spinlock_t lock; } uncore; - - /* only to allow build, not used functionally */ - struct { - unsigned int hpll_freq; - unsigned int czclk_freq; - }; #endif }; /** - * struct xe_file - file handle for XE driver + * struct xe_file - file handle for Xe driver */ struct xe_file { /** @xe: xe DEVICE **/ diff --git a/drivers/gpu/drm/xe/xe_device_wa_oob.rules b/drivers/gpu/drm/xe/xe_device_wa_oob.rules index 3a0c4ccc4224..55ba01bc8f38 100644 --- a/drivers/gpu/drm/xe/xe_device_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_device_wa_oob.rules @@ -1,2 +1,5 @@ +22010954014 PLATFORM(DG2) 15015404425 PLATFORM(LUNARLAKE) PLATFORM(PANTHERLAKE) +22019338487_display PLATFORM(LUNARLAKE) +14022085890 SUBPLATFORM(BATTLEMAGE, G21) diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index a7d67725c3ee..54e42960daad 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -48,32 +48,43 @@ static void xe_dma_buf_detach(struct dma_buf *dmabuf, static int xe_dma_buf_pin(struct dma_buf_attachment *attach) { - struct drm_gem_object *obj = attach->dmabuf->priv; + struct dma_buf *dmabuf = attach->dmabuf; + struct drm_gem_object *obj = dmabuf->priv; struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = xe_bo_device(bo); struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED; + bool allow_vram = true; int ret; - /* - * For now only support pinning in TT memory, for two reasons: - * 1) Avoid pinning in a placement not accessible to some importers. - * 2) Pinning in VRAM requires PIN accounting which is a to-do. - */ - if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) { + if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { + allow_vram = false; + } else { + list_for_each_entry(attach, &dmabuf->attachments, node) { + if (!attach->peer2peer) { + allow_vram = false; + break; + } + } + } + + if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT) && + !(xe_bo_is_vram(bo) && allow_vram)) { drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n"); return -EINVAL; } - ret = xe_bo_migrate(bo, XE_PL_TT, NULL, exec); - if (ret) { - if (ret != -EINTR && ret != -ERESTARTSYS) - drm_dbg(&xe->drm, - "Failed migrating dma-buf to TT memory: %pe\n", - ERR_PTR(ret)); - return ret; + if (!allow_vram) { + ret = xe_bo_migrate(bo, XE_PL_TT, NULL, exec); + if (ret) { + if (ret != -EINTR && ret != -ERESTARTSYS) + drm_dbg(&xe->drm, + "Failed migrating dma-buf to TT memory: %pe\n", + ERR_PTR(ret)); + return ret; + } } - ret = xe_bo_pin_external(bo, true, exec); + ret = xe_bo_pin_external(bo, !allow_vram, exec); xe_assert(xe, !ret); return 0; diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c index f5cfdf29fde3..97dfb7945b7a 100644 --- a/drivers/gpu/drm/xe/xe_eu_stall.c +++ b/drivers/gpu/drm/xe/xe_eu_stall.c @@ -49,6 +49,7 @@ struct xe_eu_stall_data_stream { wait_queue_head_t poll_wq; size_t data_record_size; size_t per_xecore_buf_size; + unsigned int fw_ref; struct xe_gt *gt; struct xe_bo *bo; @@ -124,6 +125,27 @@ struct xe_eu_stall_data_xe2 { __u64 unused[6]; } __packed; +/* + * EU stall data format for Xe3p arch GPUs. + */ +struct xe_eu_stall_data_xe3p { + __u64 ip_addr:61; /* Bits 0 to 60 */ + __u64 tdr_count:8; /* Bits 61 to 68 */ + __u64 other_count:8; /* Bits 69 to 76 */ + __u64 control_count:8; /* Bits 77 to 84 */ + __u64 pipestall_count:8; /* Bits 85 to 92 */ + __u64 send_count:8; /* Bits 93 to 100 */ + __u64 dist_acc_count:8; /* Bits 101 to 108 */ + __u64 sbid_count:8; /* Bits 109 to 116 */ + __u64 sync_count:8; /* Bits 117 to 124 */ + __u64 inst_fetch_count:8; /* Bits 125 to 132 */ + __u64 active_count:8; /* Bits 133 to 140 */ + __u64 ex_id:3; /* Bits 141 to 143 */ + __u64 end_flag:1; /* Bit 144 */ + __u64 unused_bits:47; + __u64 unused[5]; +} __packed; + const u64 eu_stall_sampling_rates[] = {251, 251 * 2, 251 * 3, 251 * 4, 251 * 5, 251 * 6, 251 * 7}; /** @@ -167,10 +189,13 @@ size_t xe_eu_stall_data_record_size(struct xe_device *xe) { size_t record_size = 0; - if (xe->info.platform == XE_PVC) - record_size = sizeof(struct xe_eu_stall_data_pvc); + if (GRAPHICS_VER(xe) >= 35) + record_size = sizeof(struct xe_eu_stall_data_xe3p); else if (GRAPHICS_VER(xe) >= 20) record_size = sizeof(struct xe_eu_stall_data_xe2); + else if (xe->info.platform == XE_PVC) + record_size = sizeof(struct xe_eu_stall_data_pvc); + xe_assert(xe, is_power_of_2(record_size)); @@ -636,13 +661,12 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream) struct per_xecore_buf *xecore_buf; struct xe_gt *gt = stream->gt; u16 group, instance; - unsigned int fw_ref; int xecore; /* Take runtime pm ref and forcewake to disable RC6 */ xe_pm_runtime_get(gt_to_xe(gt)); - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_RENDER); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_RENDER)) { + stream->fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_RENDER); + if (!xe_force_wake_ref_has_domain(stream->fw_ref, XE_FW_RENDER)) { xe_gt_err(gt, "Failed to get RENDER forcewake\n"); xe_pm_runtime_put(gt_to_xe(gt)); return -ETIMEDOUT; @@ -808,7 +832,7 @@ static int xe_eu_stall_disable_locked(struct xe_eu_stall_data_stream *stream) xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2, _MASKED_BIT_DISABLE(DISABLE_DOP_GATING)); - xe_force_wake_put(gt_to_fw(gt), XE_FW_RENDER); + xe_force_wake_put(gt_to_fw(gt), stream->fw_ref); xe_pm_runtime_put(gt_to_xe(gt)); return 0; diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index 7715e74bb945..4d81210e41f5 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -16,10 +16,12 @@ #include "xe_exec_queue.h" #include "xe_hw_engine_group.h" #include "xe_macros.h" +#include "xe_pm.h" #include "xe_ring_ops_types.h" #include "xe_sched_job.h" #include "xe_sync.h" #include "xe_svm.h" +#include "xe_trace.h" #include "xe_vm.h" /** @@ -32,7 +34,7 @@ * - Binding at exec time * - Flow controlling the ring at exec time * - * In XE we avoid all of this complication by not allowing a BO list to be + * In Xe we avoid all of this complication by not allowing a BO list to be * passed into an exec, using the dma-buf implicit sync uAPI, have binds as * separate operations, and using the DRM scheduler to flow control the ring. * Let's deep dive on each of these. @@ -123,7 +125,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct xe_validation_ctx ctx; struct xe_sched_job *job; struct xe_vm *vm; - bool write_locked, skip_retry = false; + bool write_locked; int err = 0; struct xe_hw_engine_group *group; enum xe_hw_engine_group_execution_mode mode, previous_mode; @@ -153,6 +155,12 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto err_exec_queue; } + if (atomic_read(&q->job_cnt) >= XE_MAX_JOB_COUNT_PER_EXEC_QUEUE) { + trace_xe_exec_queue_reach_max_job_count(q, XE_MAX_JOB_COUNT_PER_EXEC_QUEUE); + err = -EAGAIN; + goto err_exec_queue; + } + if (args->num_syncs) { syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL); if (!syncs) { @@ -165,7 +173,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs], - &syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC | + &syncs_user[num_syncs], NULL, 0, + SYNC_PARSE_FLAG_EXEC | (xe_vm_in_lr_mode(vm) ? SYNC_PARSE_FLAG_LR_MODE : 0)); if (err) @@ -247,7 +256,7 @@ retry: * on task freezing during suspend / hibernate, the call will * return -ERESTARTSYS and the IOCTL will be rerun. */ - err = wait_for_completion_interruptible(&xe->pm_block); + err = xe_pm_block_on_suspend(xe); if (err) goto err_unlock_list; @@ -265,12 +274,6 @@ retry: goto err_exec; } - if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) { - err = -EWOULDBLOCK; /* Aliased to -EAGAIN */ - skip_retry = true; - goto err_exec; - } - if (xe_exec_queue_uses_pxp(q)) { err = xe_vm_validate_protected(q->vm); if (err) @@ -299,10 +302,6 @@ retry: goto err_put_job; if (!xe_vm_in_lr_mode(vm)) { - err = xe_sched_job_last_fence_add_dep(job, vm); - if (err) - goto err_put_job; - err = xe_svm_notifier_lock_interruptible(vm); if (err) goto err_put_job; @@ -327,8 +326,6 @@ retry: xe_sched_job_init_user_fence(job, &syncs[i]); } - if (xe_exec_queue_is_lr(q)) - q->ring_ops->emit_job(job); if (!xe_vm_in_lr_mode(vm)) xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished); xe_sched_job_push(job); @@ -354,7 +351,7 @@ err_exec: xe_validation_ctx_fini(&ctx); err_unlock_list: up_read(&vm->lock); - if (err == -EAGAIN && !skip_retry) + if (err == -EAGAIN) goto retry; err_hw_exec_mode: if (mode == EXEC_MODE_DMA_FENCE) diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 37b2b93b73d6..8724f8de67e2 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -10,11 +10,13 @@ #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_syncobj.h> #include <uapi/drm/xe_drm.h> #include "xe_dep_scheduler.h" #include "xe_device.h" #include "xe_gt.h" +#include "xe_gt_sriov_vf.h" #include "xe_hw_engine_class_sysfs.h" #include "xe_hw_engine_group.h" #include "xe_hw_fence.h" @@ -28,6 +30,29 @@ #include "xe_vm.h" #include "xe_pxp.h" +/** + * DOC: Execution Queue + * + * An Execution queue is an interface for the HW context of execution. + * The user creates an execution queue, submits the GPU jobs through those + * queues and in the end destroys them. + * + * Execution queues can also be created by XeKMD itself for driver internal + * operations like object migration etc. + * + * An execution queue is associated with a specified HW engine or a group of + * engines (belonging to the same tile and engine class) and any GPU job + * submitted on the queue will be run on one of these engines. + * + * An execution queue is tied to an address space (VM). It holds a reference + * of the associated VM and the underlying Logical Ring Context/s (LRC/s) + * until the queue is destroyed. + * + * The execution queue sits on top of the submission backend. It opaquely + * handles the GuC and Execlist backends whichever the platform uses, and + * the ring operations the different engine classes support. + */ + enum xe_exec_queue_sched_prop { XE_EXEC_QUEUE_JOB_TIMEOUT = 0, XE_EXEC_QUEUE_TIMESLICE = 1, @@ -160,7 +185,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, return q; } -static int __xe_exec_queue_init(struct xe_exec_queue *q) +static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags) { int i, err; u32 flags = 0; @@ -179,17 +204,37 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q) flags |= XE_LRC_CREATE_RUNALONE; } + if (!(exec_queue_flags & EXEC_QUEUE_FLAG_KERNEL)) + flags |= XE_LRC_CREATE_USER_CTX; + + err = q->ops->init(q); + if (err) + return err; + + /* + * This must occur after q->ops->init to avoid race conditions during VF + * post-migration recovery, as the fixups for the LRC GGTT addresses + * depend on the queue being present in the backend tracking structure. + * + * In addition to above, we must wait on inflight GGTT changes to avoid + * writing out stale values here. Such wait provides a solid solution + * (without a race) only if the function can detect migration instantly + * from the moment vCPU resumes execution. + */ for (i = 0; i < q->width; ++i) { - q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags); - if (IS_ERR(q->lrc[i])) { - err = PTR_ERR(q->lrc[i]); + struct xe_lrc *lrc; + + xe_gt_sriov_vf_wait_valid_ggtt(q->gt); + lrc = xe_lrc_create(q->hwe, q->vm, xe_lrc_ring_size(), + q->msix_vec, flags); + if (IS_ERR(lrc)) { + err = PTR_ERR(lrc); goto err_lrc; } - } - err = q->ops->init(q); - if (err) - goto err_lrc; + /* Pairs with READ_ONCE to xe_exec_queue_contexts_hwsp_rebase */ + WRITE_ONCE(q->lrc[i], lrc); + } return 0; @@ -225,7 +270,7 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v if (IS_ERR(q)) return q; - err = __xe_exec_queue_init(q); + err = __xe_exec_queue_init(q, flags); if (err) goto err_post_alloc; @@ -324,6 +369,16 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, } xe_vm_put(migrate_vm); + if (!IS_ERR(q)) { + int err = drm_syncobj_create(&q->ufence_syncobj, + DRM_SYNCOBJ_CREATE_SIGNALED, + NULL); + if (err) { + xe_exec_queue_put(q); + return ERR_PTR(err); + } + } + return q; } ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO); @@ -332,11 +387,20 @@ void xe_exec_queue_destroy(struct kref *ref) { struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); struct xe_exec_queue *eq, *next; + int i; + + xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0); + + if (q->ufence_syncobj) + drm_syncobj_put(q->ufence_syncobj); if (xe_exec_queue_uses_pxp(q)) xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); xe_exec_queue_last_fence_put_unlocked(q); + for_each_tlb_inval(i) + xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, i); + if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { list_for_each_entry_safe(eq, next, &q->multi_gt_list, multi_gt_link) @@ -824,25 +888,6 @@ bool xe_exec_queue_is_lr(struct xe_exec_queue *q) !(q->flags & EXEC_QUEUE_FLAG_VM); } -static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q) -{ - return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; -} - -/** - * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full - * @q: The exec_queue - * - * Return: True if the exec_queue's ring is full, false otherwise. - */ -bool xe_exec_queue_ring_full(struct xe_exec_queue *q) -{ - struct xe_lrc *lrc = q->lrc[0]; - s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES; - - return xe_exec_queue_num_job_inflight(q) >= max_job; -} - /** * xe_exec_queue_is_idle() - Whether an exec_queue is idle. * @q: The exec_queue @@ -973,7 +1018,9 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data, static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, struct xe_vm *vm) { - if (q->flags & EXEC_QUEUE_FLAG_VM) { + if (q->flags & EXEC_QUEUE_FLAG_MIGRATE) { + xe_migrate_job_lock_assert(q); + } else if (q->flags & EXEC_QUEUE_FLAG_VM) { lockdep_assert_held(&vm->lock); } else { xe_vm_assert_held(vm); @@ -1072,32 +1119,104 @@ void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, struct dma_fence *fence) { xe_exec_queue_last_fence_lockdep_assert(q, vm); + xe_assert(vm->xe, !dma_fence_is_container(fence)); xe_exec_queue_last_fence_put(q, vm); q->last_fence = dma_fence_get(fence); } /** - * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue + * xe_exec_queue_tlb_inval_last_fence_put() - Drop ref to last TLB invalidation fence * @q: The exec queue - * @vm: The VM the engine does a bind or exec for + * @vm: The VM the engine does a bind for + * @type: Either primary or media GT + */ +void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q, + struct xe_vm *vm, + unsigned int type) +{ + xe_exec_queue_last_fence_lockdep_assert(q, vm); + xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || + type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT); + + xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, type); +} + +/** + * xe_exec_queue_tlb_inval_last_fence_put_unlocked() - Drop ref to last TLB + * invalidation fence unlocked + * @q: The exec queue + * @type: Either primary or media GT * - * Returns: - * -ETIME if there exists an unsignalled last fence dependency, zero otherwise. + * Only safe to be called from xe_exec_queue_destroy(). */ -int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm) +void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q, + unsigned int type) +{ + xe_assert(q->vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || + type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT); + + dma_fence_put(q->tlb_inval[type].last_fence); + q->tlb_inval[type].last_fence = NULL; +} + +/** + * xe_exec_queue_tlb_inval_last_fence_get() - Get last fence for TLB invalidation + * @q: The exec queue + * @vm: The VM the engine does a bind for + * @type: Either primary or media GT + * + * Get last fence, takes a ref + * + * Returns: last fence if not signaled, dma fence stub if signaled + */ +struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q, + struct xe_vm *vm, + unsigned int type) { struct dma_fence *fence; - int err = 0; - fence = xe_exec_queue_last_fence_get(q, vm); - if (fence) { - err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? - 0 : -ETIME; - dma_fence_put(fence); - } + xe_exec_queue_last_fence_lockdep_assert(q, vm); + xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || + type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT); + xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM | + EXEC_QUEUE_FLAG_MIGRATE)); - return err; + if (q->tlb_inval[type].last_fence && + test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &q->tlb_inval[type].last_fence->flags)) + xe_exec_queue_tlb_inval_last_fence_put(q, vm, type); + + fence = q->tlb_inval[type].last_fence ?: dma_fence_get_stub(); + dma_fence_get(fence); + return fence; +} + +/** + * xe_exec_queue_tlb_inval_last_fence_set() - Set last fence for TLB invalidation + * @q: The exec queue + * @vm: The VM the engine does a bind for + * @fence: The fence + * @type: Either primary or media GT + * + * Set the last fence for the tlb invalidation type on the queue. Increases + * reference count for fence, when closing queue + * xe_exec_queue_tlb_inval_last_fence_put should be called. + */ +void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q, + struct xe_vm *vm, + struct dma_fence *fence, + unsigned int type) +{ + xe_exec_queue_last_fence_lockdep_assert(q, vm); + xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || + type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT); + xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM | + EXEC_QUEUE_FLAG_MIGRATE)); + xe_assert(vm->xe, !dma_fence_is_container(fence)); + + xe_exec_queue_tlb_inval_last_fence_put(q, vm, type); + q->tlb_inval[type].last_fence = dma_fence_get(fence); } /** @@ -1114,36 +1233,19 @@ int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch) int err = 0; for (i = 0; i < q->width; ++i) { - xe_lrc_update_memirq_regs_with_address(q->lrc[i], q->hwe, scratch); - xe_lrc_update_hwctx_regs_with_address(q->lrc[i]); - err = xe_lrc_setup_wa_bb_with_scratch(q->lrc[i], q->hwe, scratch); + struct xe_lrc *lrc; + + /* Pairs with WRITE_ONCE in __xe_exec_queue_init */ + lrc = READ_ONCE(q->lrc[i]); + if (!lrc) + continue; + + xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch); + xe_lrc_update_hwctx_regs_with_address(lrc); + err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch); if (err) break; } return err; } - -/** - * xe_exec_queue_jobs_ring_restore - Re-emit ring commands of requests pending on given queue. - * @q: the &xe_exec_queue struct instance - */ -void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q) -{ - struct xe_gpu_scheduler *sched = &q->guc->sched; - struct xe_sched_job *job; - - /* - * This routine is used within VF migration recovery. This means - * using the lock here introduces a restriction: we cannot wait - * for any GFX HW response while the lock is taken. - */ - spin_lock(&sched->base.job_list_lock); - list_for_each_entry(job, &sched->base.pending_list, drm.list) { - if (xe_sched_job_is_error(job)) - continue; - - q->ring_ops->emit_job(job); - } - spin_unlock(&sched->base.job_list_lock); -} diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h index 15ec852e7f7e..fda4d4f9bda8 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.h +++ b/drivers/gpu/drm/xe/xe_exec_queue.h @@ -14,6 +14,10 @@ struct drm_file; struct xe_device; struct xe_file; +#define for_each_tlb_inval(__i) \ + for (__i = XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT; \ + __i <= XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT; ++__i) + struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, u32 logical_mask, u16 width, struct xe_hw_engine *hw_engine, u32 flags, @@ -64,8 +68,6 @@ static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q) bool xe_exec_queue_is_lr(struct xe_exec_queue *q); -bool xe_exec_queue_ring_full(struct xe_exec_queue *q); - bool xe_exec_queue_is_idle(struct xe_exec_queue *q); void xe_exec_queue_kill(struct xe_exec_queue *q); @@ -86,13 +88,27 @@ struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue * struct xe_vm *vm); void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm, struct dma_fence *fence); -int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, - struct xe_vm *vm); + +void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q, + struct xe_vm *vm, + unsigned int type); + +void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q, + unsigned int type); + +struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q, + struct xe_vm *vm, + unsigned int type); + +void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q, + struct xe_vm *vm, + struct dma_fence *fence, + unsigned int type); + void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q); int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch); -void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q); - struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q); + #endif diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 27b76cf9da89..771ffe35cd0c 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -15,6 +15,7 @@ #include "xe_hw_fence_types.h" #include "xe_lrc_types.h" +struct drm_syncobj; struct xe_execlist_exec_queue; struct xe_gt; struct xe_guc_exec_queue; @@ -145,6 +146,11 @@ struct xe_exec_queue { * dependency scheduler */ struct xe_dep_scheduler *dep_scheduler; + /** + * @last_fence: last fence for tlb invalidation, protected by + * vm->lock in write mode + */ + struct dma_fence *last_fence; } tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_COUNT]; /** @pxp: PXP info tracking */ @@ -155,6 +161,12 @@ struct xe_exec_queue { struct list_head link; } pxp; + /** @ufence_syncobj: User fence syncobj */ + struct drm_syncobj *ufence_syncobj; + + /** @ufence_timeline_value: User fence timeline value */ + u64 ufence_timeline_value; + /** @ops: submission backend exec queue operations */ const struct xe_exec_queue_ops *ops; @@ -162,6 +174,11 @@ struct xe_exec_queue { const struct xe_ring_ops *ring_ops; /** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */ struct drm_sched_entity *entity; + +#define XE_MAX_JOB_COUNT_PER_EXEC_QUEUE 1000 + /** @job_cnt: number of drm jobs in this exec queue */ + atomic_t job_cnt; + /** * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed * Protected by @vm's resv. Unused if @vm == NULL. @@ -207,6 +224,9 @@ struct xe_exec_queue_ops { * call after suspend. In dma-fencing path thus must return within a * reasonable amount of time. -ETIME return shall indicate an error * waiting for suspend resulting in associated VM getting killed. + * -EAGAIN return indicates the wait should be tried again, if the wait + * is within a work item, the work item should be requeued as deadlock + * avoidance mechanism. */ int (*suspend_wait)(struct xe_exec_queue *q); /** diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c index f83d421ac9d3..769d05517f93 100644 --- a/drivers/gpu/drm/xe/xe_execlist.c +++ b/drivers/gpu/drm/xe/xe_execlist.c @@ -339,7 +339,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q) const struct drm_sched_init_args args = { .ops = &drm_sched_ops, .num_rqs = 1, - .credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, + .credit_limit = xe_lrc_ring_size() / MAX_JOB_SIZE_BYTES, .hang_limit = XE_SCHED_HANG_LIMIT, .timeout = XE_SCHED_JOB_TIMEOUT, .name = q->hwe->name, diff --git a/drivers/gpu/drm/xe/xe_force_wake_types.h b/drivers/gpu/drm/xe/xe_force_wake_types.h index 899fbbcb3ea9..14b7b86e801b 100644 --- a/drivers/gpu/drm/xe/xe_force_wake_types.h +++ b/drivers/gpu/drm/xe/xe_force_wake_types.h @@ -52,7 +52,22 @@ enum xe_force_wake_domains { }; /** - * struct xe_force_wake_domain - XE force wake domains + * struct xe_force_wake_domain - Xe force wake power domain + * + * Represents an individual device-internal power domain. The driver must + * ensure the power domain is awake before accessing registers or other + * hardware functionality that is part of the power domain. Since different + * driver threads may access hardware units simultaneously, a reference count + * is used to ensure that the domain remains awake as long as any software + * is using the part of the hardware covered by the power domain. + * + * Hardware provides a register interface to allow the driver to request + * wake/sleep of power domains, although in most cases the actual action of + * powering the hardware up/down is handled by firmware (and may be subject to + * requirements and constraints outside of the driver's visibility) so the + * driver needs to wait for an acknowledgment that a wake request has been + * acted upon before accessing the parts of the hardware that reside within the + * power domain. */ struct xe_force_wake_domain { /** @id: domain force wake id */ @@ -70,7 +85,14 @@ struct xe_force_wake_domain { }; /** - * struct xe_force_wake - XE force wake + * struct xe_force_wake - Xe force wake collection + * + * Represents a collection of related power domains (struct + * xe_force_wake_domain) associated with a subunit of the device. + * + * Currently only used for GT power domains (where the term "forcewake" is used + * in the hardware documentation), although the interface could be extended to + * power wells in other parts of the hardware in the future. */ struct xe_force_wake { /** @gt: back pointers to GT */ diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 5edc0cad47e2..ef481b334af4 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -107,10 +107,23 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev) static void ggtt_update_access_counter(struct xe_ggtt *ggtt) { struct xe_tile *tile = ggtt->tile; - struct xe_gt *affected_gt = XE_GT_WA(tile->primary_gt, 22019338487) ? - tile->primary_gt : tile->media_gt; - struct xe_mmio *mmio = &affected_gt->mmio; - u32 max_gtt_writes = XE_GT_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63; + struct xe_gt *affected_gt; + u32 max_gtt_writes; + + if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 22019338487)) { + affected_gt = tile->primary_gt; + max_gtt_writes = 1100; + + /* Only expected to apply to primary GT on dgpu platforms */ + xe_tile_assert(tile, IS_DGFX(tile_to_xe(tile))); + } else { + affected_gt = tile->media_gt; + max_gtt_writes = 63; + + /* Only expected to apply to media GT on igpu platforms */ + xe_tile_assert(tile, !IS_DGFX(tile_to_xe(tile))); + } + /* * Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit * to wait for completion of prior GTT writes before letting this through. @@ -119,7 +132,7 @@ static void ggtt_update_access_counter(struct xe_ggtt *ggtt) lockdep_assert_held(&ggtt->lock); if ((++ggtt->access_count % max_gtt_writes) == 0) { - xe_mmio_write32(mmio, GMD_ID, 0x0); + xe_mmio_write32(&affected_gt->mmio, GMD_ID, 0x0); ggtt->access_count = 0; } } @@ -138,6 +151,14 @@ static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte) ggtt_update_access_counter(ggtt); } +static u64 xe_ggtt_get_pte(struct xe_ggtt *ggtt, u64 addr) +{ + xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK)); + xe_tile_assert(ggtt->tile, addr < ggtt->size); + + return readq(&ggtt->gsm[addr >> XE_PTE_SHIFT]); +} + static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size) { u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB]; @@ -159,6 +180,16 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size) } } +static void primelockdep(struct xe_ggtt *ggtt) +{ + if (!IS_ENABLED(CONFIG_LOCKDEP)) + return; + + fs_reclaim_acquire(GFP_KERNEL); + might_lock(&ggtt->lock); + fs_reclaim_release(GFP_KERNEL); +} + /** * xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile * @tile: &xe_tile @@ -169,9 +200,19 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size) */ struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile) { - struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL); - if (ggtt) - ggtt->tile = tile; + struct xe_device *xe = tile_to_xe(tile); + struct xe_ggtt *ggtt; + + ggtt = drmm_kzalloc(&xe->drm, sizeof(*ggtt), GFP_KERNEL); + if (!ggtt) + return NULL; + + if (drmm_mutex_init(&xe->drm, &ggtt->lock)) + return NULL; + + primelockdep(ggtt); + ggtt->tile = tile; + return ggtt; } @@ -180,7 +221,6 @@ static void ggtt_fini_early(struct drm_device *drm, void *arg) struct xe_ggtt *ggtt = arg; destroy_workqueue(ggtt->wq); - mutex_destroy(&ggtt->lock); drm_mm_takedown(&ggtt->mm); } @@ -198,37 +238,28 @@ void xe_ggtt_might_lock(struct xe_ggtt *ggtt) } #endif -static void primelockdep(struct xe_ggtt *ggtt) -{ - if (!IS_ENABLED(CONFIG_LOCKDEP)) - return; - - fs_reclaim_acquire(GFP_KERNEL); - might_lock(&ggtt->lock); - fs_reclaim_release(GFP_KERNEL); -} - static const struct xe_ggtt_pt_ops xelp_pt_ops = { .pte_encode_flags = xelp_ggtt_pte_flags, .ggtt_set_pte = xe_ggtt_set_pte, + .ggtt_get_pte = xe_ggtt_get_pte, }; static const struct xe_ggtt_pt_ops xelpg_pt_ops = { .pte_encode_flags = xelpg_ggtt_pte_flags, .ggtt_set_pte = xe_ggtt_set_pte, + .ggtt_get_pte = xe_ggtt_get_pte, }; static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = { .pte_encode_flags = xelpg_ggtt_pte_flags, .ggtt_set_pte = xe_ggtt_set_pte_and_flush, + .ggtt_get_pte = xe_ggtt_get_pte, }; static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved) { drm_mm_init(&ggtt->mm, reserved, ggtt->size - reserved); - mutex_init(&ggtt->lock); - primelockdep(ggtt); } int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size) @@ -284,10 +315,10 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) ggtt->size = GUC_GGTT_TOP; if (GRAPHICS_VERx100(xe) >= 1270) - ggtt->pt_ops = (ggtt->tile->media_gt && - XE_GT_WA(ggtt->tile->media_gt, 22019338487)) || - XE_GT_WA(ggtt->tile->primary_gt, 22019338487) ? - &xelpg_pt_wa_ops : &xelpg_pt_ops; + ggtt->pt_ops = + (ggtt->tile->media_gt && XE_GT_WA(ggtt->tile->media_gt, 22019338487)) || + (ggtt->tile->primary_gt && XE_GT_WA(ggtt->tile->primary_gt, 22019338487)) ? + &xelpg_pt_wa_ops : &xelpg_pt_ops; else ggtt->pt_ops = &xelp_pt_ops; @@ -678,6 +709,20 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node) } /** + * xe_ggtt_node_pt_size() - Get the size of page table entries needed to map a GGTT node. + * @node: the &xe_ggtt_node + * + * Return: GGTT node page table entries size in bytes. + */ +size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node) +{ + if (!node) + return 0; + + return node->base.size / XE_PAGE_SIZE * sizeof(u64); +} + +/** * xe_ggtt_map_bo - Map the BO into GGTT * @ggtt: the &xe_ggtt where node will be mapped * @node: the &xe_ggtt_node where this BO is mapped @@ -910,6 +955,85 @@ void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid) xe_ggtt_assign_locked(node->ggtt, &node->base, vfid); mutex_unlock(&node->ggtt->lock); } + +/** + * xe_ggtt_node_save() - Save a &xe_ggtt_node to a buffer. + * @node: the &xe_ggtt_node to be saved + * @dst: destination buffer + * @size: destination buffer size in bytes + * @vfid: VF identifier + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid) +{ + struct xe_ggtt *ggtt; + u64 start, end; + u64 *buf = dst; + u64 pte; + + if (!node) + return -ENOENT; + + guard(mutex)(&node->ggtt->lock); + + if (xe_ggtt_node_pt_size(node) != size) + return -EINVAL; + + ggtt = node->ggtt; + start = node->base.start; + end = start + node->base.size - 1; + + while (start < end) { + pte = ggtt->pt_ops->ggtt_get_pte(ggtt, start); + if (vfid != u64_get_bits(pte, GGTT_PTE_VFID)) + return -EPERM; + + *buf++ = u64_replace_bits(pte, 0, GGTT_PTE_VFID); + start += XE_PAGE_SIZE; + } + + return 0; +} + +/** + * xe_ggtt_node_load() - Load a &xe_ggtt_node from a buffer. + * @node: the &xe_ggtt_node to be loaded + * @src: source buffer + * @size: source buffer size in bytes + * @vfid: VF identifier + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid) +{ + u64 vfid_pte = xe_encode_vfid_pte(vfid); + const u64 *buf = src; + struct xe_ggtt *ggtt; + u64 start, end; + + if (!node) + return -ENOENT; + + guard(mutex)(&node->ggtt->lock); + + if (xe_ggtt_node_pt_size(node) != size) + return -EINVAL; + + ggtt = node->ggtt; + start = node->base.start; + end = start + node->base.size - 1; + + while (start < end) { + vfid_pte = u64_replace_bits(*buf++, vfid, GGTT_PTE_VFID); + ggtt->pt_ops->ggtt_set_pte(ggtt, start, vfid_pte); + start += XE_PAGE_SIZE; + } + xe_ggtt_invalidate(ggtt); + + return 0; +} + #endif /** diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h index 75fc7a1efea7..93fea4b6079c 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.h +++ b/drivers/gpu/drm/xe/xe_ggtt.h @@ -29,6 +29,7 @@ int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node, u32 size, u32 align, u32 mm_flags); void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate); bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node); +size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node); void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node, struct xe_bo *bo, u16 pat_index); void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo); @@ -43,6 +44,8 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer #ifdef CONFIG_PCI_IOV void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid); +int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid); +int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid); #endif #ifndef CONFIG_LOCKDEP diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h index c5e999d58ff2..dacd796f8184 100644 --- a/drivers/gpu/drm/xe/xe_ggtt_types.h +++ b/drivers/gpu/drm/xe/xe_ggtt_types.h @@ -78,6 +78,8 @@ struct xe_ggtt_pt_ops { u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index); /** @ggtt_set_pte: Directly write into GGTT's PTE */ void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte); + /** @ggtt_get_pte: Directly read from GGTT's PTE */ + u64 (*ggtt_get_pte)(struct xe_ggtt *ggtt, u64 addr); }; #endif diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c index 455ccaf17314..f91e06d03511 100644 --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c @@ -101,19 +101,6 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched) cancel_work_sync(&sched->work_process_msg); } -/** - * xe_sched_submission_stop_async - Stop further runs of submission tasks on a scheduler. - * @sched: the &xe_gpu_scheduler struct instance - * - * This call disables further runs of scheduling work queue. It does not wait - * for any in-progress runs to finish, only makes sure no further runs happen - * afterwards. - */ -void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched) -{ - drm_sched_wqueue_stop(&sched->base); -} - void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched) { drm_sched_resume_timeout(&sched->base, sched->base.timeout); @@ -135,3 +122,17 @@ void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched, list_add_tail(&msg->link, &sched->msgs); xe_sched_process_msg_queue(sched); } + +/** + * xe_sched_add_msg_head() - Xe GPU scheduler add message to head of list + * @sched: Xe GPU scheduler + * @msg: Message to add + */ +void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched, + struct xe_sched_msg *msg) +{ + lockdep_assert_held(&sched->base.job_list_lock); + + list_add(&msg->link, &sched->msgs); + xe_sched_process_msg_queue(sched); +} diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h index e548b2aed95a..9955397aaaa9 100644 --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h @@ -7,7 +7,7 @@ #define _XE_GPU_SCHEDULER_H_ #include "xe_gpu_scheduler_types.h" -#include "xe_sched_job_types.h" +#include "xe_sched_job.h" int xe_sched_init(struct xe_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, @@ -21,7 +21,6 @@ void xe_sched_fini(struct xe_gpu_scheduler *sched); void xe_sched_submission_start(struct xe_gpu_scheduler *sched); void xe_sched_submission_stop(struct xe_gpu_scheduler *sched); -void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched); void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched); @@ -29,6 +28,8 @@ void xe_sched_add_msg(struct xe_gpu_scheduler *sched, struct xe_sched_msg *msg); void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched, struct xe_sched_msg *msg); +void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched, + struct xe_sched_msg *msg); static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched) { @@ -58,7 +59,8 @@ static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched) struct drm_sched_fence *s_fence = s_job->s_fence; struct dma_fence *hw_fence = s_fence->parent; - if (hw_fence && !dma_fence_is_signaled(hw_fence)) + if (to_xe_sched_job(s_job)->skip_emit || + (hw_fence && !dma_fence_is_signaled(hw_fence))) sched->base.ops->run_job(s_job); } } @@ -77,17 +79,30 @@ static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched, spin_unlock(&sched->base.job_list_lock); } +/** + * xe_sched_first_pending_job() - Find first pending job which is unsignaled + * @sched: Xe GPU scheduler + * + * Return first unsignaled job in pending list or NULL + */ static inline struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched) { - struct xe_sched_job *job; + struct xe_sched_job *job, *r_job = NULL; spin_lock(&sched->base.job_list_lock); - job = list_first_entry_or_null(&sched->base.pending_list, - struct xe_sched_job, drm.list); + list_for_each_entry(job, &sched->base.pending_list, drm.list) { + struct drm_sched_fence *s_fence = job->drm.s_fence; + struct dma_fence *hw_fence = s_fence->parent; + + if (hw_fence && !dma_fence_is_signaled(hw_fence)) { + r_job = job; + break; + } + } spin_unlock(&sched->base.job_list_lock); - return job; + return r_job; } static inline int diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index 83d61bf8ec62..dd69cb834f8e 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -266,7 +266,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc) unsigned int fw_ref; int ret; - if (XE_GT_WA(tile->primary_gt, 14018094691)) { + if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 14018094691)) { fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); /* @@ -281,7 +281,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc) ret = gsc_upload(gsc); - if (XE_GT_WA(tile->primary_gt, 14018094691)) + if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 14018094691)) xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref); if (ret) diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 6d3db5e55d98..dbb5e7a9bc6a 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -32,7 +32,6 @@ #include "xe_gt_freq.h" #include "xe_gt_idle.h" #include "xe_gt_mcr.h" -#include "xe_gt_pagefault.h" #include "xe_gt_printk.h" #include "xe_gt_sriov_pf.h" #include "xe_gt_sriov_vf.h" @@ -49,6 +48,7 @@ #include "xe_map.h" #include "xe_migrate.h" #include "xe_mmio.h" +#include "xe_pagefault.h" #include "xe_pat.h" #include "xe_pm.h" #include "xe_mocs.h" @@ -65,29 +65,29 @@ #include "xe_wa.h" #include "xe_wopcm.h" -static void gt_fini(struct drm_device *drm, void *arg) -{ - struct xe_gt *gt = arg; - - destroy_workqueue(gt->ordered_wq); -} - struct xe_gt *xe_gt_alloc(struct xe_tile *tile) { + struct xe_device *xe = tile_to_xe(tile); + struct drm_device *drm = &xe->drm; + bool shared_wq = xe->info.needs_shared_vf_gt_wq && tile->primary_gt && + IS_SRIOV_VF(xe); + struct workqueue_struct *ordered_wq; struct xe_gt *gt; - int err; - gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL); + gt = drmm_kzalloc(drm, sizeof(*gt), GFP_KERNEL); if (!gt) return ERR_PTR(-ENOMEM); gt->tile = tile; - gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", - WQ_MEM_RECLAIM); + if (shared_wq && tile->primary_gt->ordered_wq) + ordered_wq = tile->primary_gt->ordered_wq; + else + ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq", + WQ_MEM_RECLAIM); + if (IS_ERR(ordered_wq)) + return ERR_CAST(ordered_wq); - err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt); - if (err) - return ERR_PTR(err); + gt->ordered_wq = ordered_wq; return gt; } @@ -398,6 +398,12 @@ int xe_gt_init_early(struct xe_gt *gt) return err; } + if (IS_SRIOV_VF(gt_to_xe(gt))) { + err = xe_gt_sriov_vf_init_early(gt); + if (err) + return err; + } + xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt)); err = xe_wa_gt_init(gt); @@ -583,10 +589,8 @@ static int gt_init_with_all_forcewake(struct xe_gt *gt) if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt)) xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt); - if (IS_SRIOV_PF(gt_to_xe(gt))) { - xe_gt_sriov_pf_init(gt); + if (IS_SRIOV_PF(gt_to_xe(gt))) xe_gt_sriov_pf_init_hw(gt); - } xe_force_wake_put(gt_to_fw(gt), fw_ref); @@ -603,6 +607,13 @@ static void xe_gt_fini(void *arg) struct xe_gt *gt = arg; int i; + if (disable_work_sync(>->reset.worker)) + /* + * If gt_reset_worker was halted from executing, take care of + * releasing the rpm reference here. + */ + xe_pm_runtime_put(gt_to_xe(gt)); + for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -633,10 +644,6 @@ int xe_gt_init(struct xe_gt *gt) if (err) return err; - err = xe_gt_pagefault_init(gt); - if (err) - return err; - err = xe_gt_idle_init(>->gtidle); if (err) return err; @@ -657,6 +664,12 @@ int xe_gt_init(struct xe_gt *gt) if (err) return err; + if (IS_SRIOV_VF(gt_to_xe(gt))) { + err = xe_gt_sriov_vf_init(gt); + if (err) + return err; + } + return 0; } @@ -803,33 +816,21 @@ static int do_gt_restart(struct xe_gt *gt) return 0; } -static int gt_wait_reset_unblock(struct xe_gt *gt) -{ - return xe_guc_wait_reset_unblock(>->uc.guc); -} - -static int gt_reset(struct xe_gt *gt) +static void gt_reset_worker(struct work_struct *w) { + struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker); unsigned int fw_ref; int err; - if (xe_device_wedged(gt_to_xe(gt))) { - err = -ECANCELED; + if (xe_device_wedged(gt_to_xe(gt))) goto err_pm_put; - } /* We only support GT resets with GuC submission */ - if (!xe_device_uc_enabled(gt_to_xe(gt))) { - err = -ENODEV; + if (!xe_device_uc_enabled(gt_to_xe(gt))) goto err_pm_put; - } xe_gt_info(gt, "reset started\n"); - err = gt_wait_reset_unblock(gt); - if (!err) - xe_gt_warn(gt, "reset block failed to get lifted"); - if (xe_fault_inject_gt_reset()) { err = -ECANCELED; goto err_fail; @@ -848,7 +849,7 @@ static int gt_reset(struct xe_gt *gt) xe_uc_gucrc_disable(>->uc); xe_uc_stop_prepare(>->uc); - xe_gt_pagefault_reset(gt); + xe_pagefault_reset(gt_to_xe(gt), gt); xe_uc_stop(>->uc); @@ -863,30 +864,23 @@ static int gt_reset(struct xe_gt *gt) goto err_out; xe_force_wake_put(gt_to_fw(gt), fw_ref); + + /* Pair with get while enqueueing the work in xe_gt_reset_async() */ xe_pm_runtime_put(gt_to_xe(gt)); xe_gt_info(gt, "reset done\n"); - return 0; + return; err_out: xe_force_wake_put(gt_to_fw(gt), fw_ref); XE_WARN_ON(xe_uc_start(>->uc)); + err_fail: xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); - xe_device_declare_wedged(gt_to_xe(gt)); err_pm_put: xe_pm_runtime_put(gt_to_xe(gt)); - - return err; -} - -static void gt_reset_worker(struct work_struct *w) -{ - struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker); - - gt_reset(gt); } void xe_gt_reset_async(struct xe_gt *gt) @@ -898,6 +892,8 @@ void xe_gt_reset_async(struct xe_gt *gt) return; xe_gt_info(gt, "reset queued\n"); + + /* Pair with put in gt_reset_worker() if work is enqueued */ xe_pm_runtime_get_noresume(gt_to_xe(gt)); if (!queue_work(gt->ordered_wq, >->reset.worker)) xe_pm_runtime_put(gt_to_xe(gt)); diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h index 41880979f4de..9d710049da45 100644 --- a/drivers/gpu/drm/xe/xe_gt.h +++ b/drivers/gpu/drm/xe/xe_gt.h @@ -12,6 +12,7 @@ #include "xe_device.h" #include "xe_device_types.h" +#include "xe_gt_sriov_vf.h" #include "xe_hw_engine.h" #define for_each_hw_engine(hwe__, gt__, id__) \ @@ -21,6 +22,12 @@ #define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0) +#define GT_VER(gt) ({ \ + typeof(gt) gt_ = (gt); \ + struct xe_device *xe = gt_to_xe(gt_); \ + xe_gt_is_media_type(gt_) ? MEDIA_VER(xe) : GRAPHICS_VER(xe); \ +}) + extern struct fault_attr gt_reset_failure; static inline bool xe_fault_inject_gt_reset(void) { @@ -124,4 +131,16 @@ static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe) hwe->instance == gt->usm.reserved_bcs_instance; } +/** + * xe_gt_recovery_pending() - GT recovery pending + * @gt: the &xe_gt + * + * Return: True if GT recovery in pending, False otherwise + */ +static inline bool xe_gt_recovery_pending(struct xe_gt *gt) +{ + return IS_SRIOV_VF(gt_to_xe(gt)) && + xe_gt_sriov_vf_recovery_pending(gt); +} + #endif diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c index 4f011d1573c6..bfc25c46f798 100644 --- a/drivers/gpu/drm/xe/xe_gt_clock.c +++ b/drivers/gpu/drm/xe/xe_gt_clock.c @@ -55,30 +55,11 @@ static void read_crystal_clock(struct xe_gt *gt, u32 rpm_config_reg, u32 *freq, } } -static void check_ctc_mode(struct xe_gt *gt) -{ - /* - * CTC_MODE[0] = 1 is definitely not supported for Xe2 and later - * platforms. In theory it could be a valid setting for pre-Xe2 - * platforms, but there's no documentation on how to properly handle - * this case. Reading TIMESTAMP_OVERRIDE, as the driver attempted in - * the past has been confirmed as incorrect by the hardware architects. - * - * For now just warn if we ever encounter hardware in the wild that - * has this setting and move on as if it hadn't been set. - */ - if (xe_mmio_read32(>->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC) - xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n"); -} - int xe_gt_clock_init(struct xe_gt *gt) { u32 freq; u32 c0; - if (!IS_SRIOV_VF(gt_to_xe(gt))) - check_ctc_mode(gt); - c0 = xe_mmio_read32(>->mmio, RPM_CONFIG0); read_crystal_clock(gt, c0, &freq, >->info.timestamp_base); @@ -93,11 +74,6 @@ int xe_gt_clock_init(struct xe_gt *gt) return 0; } -static u64 div_u64_roundup(u64 n, u32 d) -{ - return div_u64(n + d - 1, d); -} - /** * xe_gt_clock_interval_to_ms - Convert sampled GT clock ticks to msec * @@ -108,5 +84,5 @@ static u64 div_u64_roundup(u64 n, u32 d) */ u64 xe_gt_clock_interval_to_ms(struct xe_gt *gt, u64 count) { - return div_u64_roundup(count * MSEC_PER_SEC, gt->info.reference_clock); + return mul_u64_u32_div(count, MSEC_PER_SEC, gt->info.reference_clock); } diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index f253e2df4907..e4fd632f43cf 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -12,7 +12,6 @@ #include "xe_device.h" #include "xe_force_wake.h" -#include "xe_ggtt.h" #include "xe_gt.h" #include "xe_gt_mcr.h" #include "xe_gt_idle.h" @@ -36,6 +35,11 @@ #include "xe_uc_debugfs.h" #include "xe_wa.h" +static struct xe_gt *node_to_gt(struct drm_info_node *node) +{ + return node->dent->d_parent->d_inode->i_private; +} + /** * xe_gt_debugfs_simple_show - A show callback for struct drm_info_list * @m: the &seq_file @@ -78,8 +82,7 @@ int xe_gt_debugfs_simple_show(struct seq_file *m, void *data) { struct drm_printer p = drm_seq_file_printer(m); struct drm_info_node *node = m->private; - struct dentry *parent = node->dent->d_parent; - struct xe_gt *gt = parent->d_inode->i_private; + struct xe_gt *gt = node_to_gt(node); int (*print)(struct xe_gt *, struct drm_printer *) = node->info_ent->data; if (WARN_ON(!print)) @@ -88,15 +91,36 @@ int xe_gt_debugfs_simple_show(struct seq_file *m, void *data) return print(gt, &p); } -static int hw_engines(struct xe_gt *gt, struct drm_printer *p) +/** + * xe_gt_debugfs_show_with_rpm - A show callback for struct drm_info_list + * @m: the &seq_file + * @data: data used by the drm debugfs helpers + * + * Similar to xe_gt_debugfs_simple_show() but implicitly takes a RPM ref. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_debugfs_show_with_rpm(struct seq_file *m, void *data) { + struct drm_info_node *node = m->private; + struct xe_gt *gt = node_to_gt(node); struct xe_device *xe = gt_to_xe(gt); + int ret; + + xe_pm_runtime_get(xe); + ret = xe_gt_debugfs_simple_show(m, data); + xe_pm_runtime_put(xe); + + return ret; +} + +static int hw_engines(struct xe_gt *gt, struct drm_printer *p) +{ struct xe_hw_engine *hwe; enum xe_hw_engine_id id; unsigned int fw_ref; int ret = 0; - xe_pm_runtime_get(xe); fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { ret = -ETIMEDOUT; @@ -108,58 +132,21 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p) fw_put: xe_force_wake_put(gt_to_fw(gt), fw_ref); - xe_pm_runtime_put(xe); - - return ret; -} - -static int powergate_info(struct xe_gt *gt, struct drm_printer *p) -{ - int ret; - - xe_pm_runtime_get(gt_to_xe(gt)); - ret = xe_gt_idle_pg_print(gt, p); - xe_pm_runtime_put(gt_to_xe(gt)); return ret; } -static int topology(struct xe_gt *gt, struct drm_printer *p) -{ - xe_pm_runtime_get(gt_to_xe(gt)); - xe_gt_topology_dump(gt, p); - xe_pm_runtime_put(gt_to_xe(gt)); - - return 0; -} - static int steering(struct xe_gt *gt, struct drm_printer *p) { - xe_pm_runtime_get(gt_to_xe(gt)); xe_gt_mcr_steering_dump(gt, p); - xe_pm_runtime_put(gt_to_xe(gt)); - return 0; } -static int ggtt(struct xe_gt *gt, struct drm_printer *p) -{ - int ret; - - xe_pm_runtime_get(gt_to_xe(gt)); - ret = xe_ggtt_dump(gt_to_tile(gt)->mem.ggtt, p); - xe_pm_runtime_put(gt_to_xe(gt)); - - return ret; -} - static int register_save_restore(struct xe_gt *gt, struct drm_printer *p) { struct xe_hw_engine *hwe; enum xe_hw_engine_id id; - xe_pm_runtime_get(gt_to_xe(gt)); - xe_reg_sr_dump(>->reg_sr, p); drm_printf(p, "\n"); @@ -177,98 +164,42 @@ static int register_save_restore(struct xe_gt *gt, struct drm_printer *p) for_each_hw_engine(hwe, gt, id) xe_reg_whitelist_dump(&hwe->reg_whitelist, p); - xe_pm_runtime_put(gt_to_xe(gt)); - - return 0; -} - -static int workarounds(struct xe_gt *gt, struct drm_printer *p) -{ - xe_pm_runtime_get(gt_to_xe(gt)); - xe_wa_dump(gt, p); - xe_pm_runtime_put(gt_to_xe(gt)); - - return 0; -} - -static int tunings(struct xe_gt *gt, struct drm_printer *p) -{ - xe_pm_runtime_get(gt_to_xe(gt)); - xe_tuning_dump(gt, p); - xe_pm_runtime_put(gt_to_xe(gt)); - - return 0; -} - -static int pat(struct xe_gt *gt, struct drm_printer *p) -{ - xe_pm_runtime_get(gt_to_xe(gt)); - xe_pat_dump(gt, p); - xe_pm_runtime_put(gt_to_xe(gt)); - - return 0; -} - -static int mocs(struct xe_gt *gt, struct drm_printer *p) -{ - xe_pm_runtime_get(gt_to_xe(gt)); - xe_mocs_dump(gt, p); - xe_pm_runtime_put(gt_to_xe(gt)); - return 0; } static int rcs_default_lrc(struct xe_gt *gt, struct drm_printer *p) { - xe_pm_runtime_get(gt_to_xe(gt)); xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_RENDER); - xe_pm_runtime_put(gt_to_xe(gt)); - return 0; } static int ccs_default_lrc(struct xe_gt *gt, struct drm_printer *p) { - xe_pm_runtime_get(gt_to_xe(gt)); xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_COMPUTE); - xe_pm_runtime_put(gt_to_xe(gt)); - return 0; } static int bcs_default_lrc(struct xe_gt *gt, struct drm_printer *p) { - xe_pm_runtime_get(gt_to_xe(gt)); xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_COPY); - xe_pm_runtime_put(gt_to_xe(gt)); - return 0; } static int vcs_default_lrc(struct xe_gt *gt, struct drm_printer *p) { - xe_pm_runtime_get(gt_to_xe(gt)); xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_VIDEO_DECODE); - xe_pm_runtime_put(gt_to_xe(gt)); - return 0; } static int vecs_default_lrc(struct xe_gt *gt, struct drm_printer *p) { - xe_pm_runtime_get(gt_to_xe(gt)); xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_VIDEO_ENHANCE); - xe_pm_runtime_put(gt_to_xe(gt)); - return 0; } static int hwconfig(struct xe_gt *gt, struct drm_printer *p) { - xe_pm_runtime_get(gt_to_xe(gt)); xe_guc_hwconfig_dump(>->uc.guc, p); - xe_pm_runtime_put(gt_to_xe(gt)); - return 0; } @@ -278,26 +209,26 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p) * - without access to the PF specific data */ static const struct drm_info_list vf_safe_debugfs_list[] = { - {"topology", .show = xe_gt_debugfs_simple_show, .data = topology}, - {"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt}, - {"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore}, - {"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds}, - {"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings}, - {"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc}, - {"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc}, - {"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc}, - {"default_lrc_vcs", .show = xe_gt_debugfs_simple_show, .data = vcs_default_lrc}, - {"default_lrc_vecs", .show = xe_gt_debugfs_simple_show, .data = vecs_default_lrc}, - {"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig}, + { "topology", .show = xe_gt_debugfs_show_with_rpm, .data = xe_gt_topology_dump }, + { "register-save-restore", + .show = xe_gt_debugfs_show_with_rpm, .data = register_save_restore }, + { "workarounds", .show = xe_gt_debugfs_show_with_rpm, .data = xe_wa_gt_dump }, + { "tunings", .show = xe_gt_debugfs_show_with_rpm, .data = xe_tuning_dump }, + { "default_lrc_rcs", .show = xe_gt_debugfs_show_with_rpm, .data = rcs_default_lrc }, + { "default_lrc_ccs", .show = xe_gt_debugfs_show_with_rpm, .data = ccs_default_lrc }, + { "default_lrc_bcs", .show = xe_gt_debugfs_show_with_rpm, .data = bcs_default_lrc }, + { "default_lrc_vcs", .show = xe_gt_debugfs_show_with_rpm, .data = vcs_default_lrc }, + { "default_lrc_vecs", .show = xe_gt_debugfs_show_with_rpm, .data = vecs_default_lrc }, + { "hwconfig", .show = xe_gt_debugfs_show_with_rpm, .data = hwconfig }, }; /* everything else should be added here */ static const struct drm_info_list pf_only_debugfs_list[] = { - {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines}, - {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs}, - {"pat", .show = xe_gt_debugfs_simple_show, .data = pat}, - {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info}, - {"steering", .show = xe_gt_debugfs_simple_show, .data = steering}, + { "hw_engines", .show = xe_gt_debugfs_show_with_rpm, .data = hw_engines }, + { "mocs", .show = xe_gt_debugfs_show_with_rpm, .data = xe_mocs_dump }, + { "pat", .show = xe_gt_debugfs_show_with_rpm, .data = xe_pat_dump }, + { "powergate_info", .show = xe_gt_debugfs_show_with_rpm, .data = xe_gt_idle_pg_print }, + { "steering", .show = xe_gt_debugfs_show_with_rpm, .data = steering }, }; static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t *ppos, diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.h b/drivers/gpu/drm/xe/xe_gt_debugfs.h index 05a6cc93c78c..32ee3264051b 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.h +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.h @@ -11,5 +11,6 @@ struct xe_gt; void xe_gt_debugfs_register(struct xe_gt *gt); int xe_gt_debugfs_simple_show(struct seq_file *m, void *data); +int xe_gt_debugfs_show_with_rpm(struct seq_file *m, void *data); #endif diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c index 4ff1b6b58d6b..849ea6c86e8e 100644 --- a/drivers/gpu/drm/xe/xe_gt_freq.c +++ b/drivers/gpu/drm/xe/xe_gt_freq.c @@ -29,24 +29,26 @@ * PCODE is the ultimate decision maker of the actual running frequency, based * on thermal and other running conditions. * - * Xe's Freq provides a sysfs API for frequency management: + * Xe's Freq provides a sysfs API for frequency management under + * ``<device>/tile#/gt#/freq0/`` directory. * - * device/tile#/gt#/freq0/<item>_freq *read-only* files: + * **Read-only** attributes: * - * - act_freq: The actual resolved frequency decided by PCODE. - * - cur_freq: The current one requested by GuC PC to the PCODE. - * - rpn_freq: The Render Performance (RP) N level, which is the minimal one. - * - rpa_freq: The Render Performance (RP) A level, which is the achiveable one. - * Calculated by PCODE at runtime based on multiple running conditions - * - rpe_freq: The Render Performance (RP) E level, which is the efficient one. - * Calculated by PCODE at runtime based on multiple running conditions - * - rp0_freq: The Render Performance (RP) 0 level, which is the maximum one. + * - ``act_freq``: The actual resolved frequency decided by PCODE. + * - ``cur_freq``: The current one requested by GuC PC to the PCODE. + * - ``rpn_freq``: The Render Performance (RP) N level, which is the minimal one. + * - ``rpa_freq``: The Render Performance (RP) A level, which is the achievable one. + * Calculated by PCODE at runtime based on multiple running conditions + * - ``rpe_freq``: The Render Performance (RP) E level, which is the efficient one. + * Calculated by PCODE at runtime based on multiple running conditions + * - ``rp0_freq``: The Render Performance (RP) 0 level, which is the maximum one. * - * device/tile#/gt#/freq0/<item>_freq *read-write* files: + * **Read-write** attributes: * - * - min_freq: Min frequency request. - * - max_freq: Max frequency request. - * If max <= min, then freq_min becomes a fixed frequency request. + * - ``min_freq``: Min frequency request. + * - ``max_freq``: Max frequency request. + * If max <= min, then freq_min becomes a fixed frequency + * request. */ static struct xe_guc_pc * @@ -99,13 +101,8 @@ static ssize_t rp0_freq_show(struct kobject *kobj, { struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); - u32 freq; - xe_pm_runtime_get(dev_to_xe(dev)); - freq = xe_guc_pc_get_rp0_freq(pc); - xe_pm_runtime_put(dev_to_xe(dev)); - - return sysfs_emit(buf, "%d\n", freq); + return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rp0_freq(pc)); } static struct kobj_attribute attr_rp0_freq = __ATTR_RO(rp0_freq); diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index 8fb1cae91724..164010860664 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -169,6 +169,15 @@ static const struct xe_mmio_range xelpg_dss_steering_table[] = { {}, }; +static const struct xe_mmio_range xe3p_xpc_xecore_steering_table[] = { + { 0x008140, 0x00817F }, /* SLICE, XeCore, SLICE */ + { 0x009480, 0x00955F }, /* SLICE, XeCore */ + { 0x00D800, 0x00D87F }, /* SLICE */ + { 0x00DC00, 0x00E9FF }, /* SLICE, rsvd, XeCore, rsvd, XeCore, rsvd, XeCore */ + { 0x013000, 0x0135FF }, /* XeCore, SLICE */ + {}, +}; + static const struct xe_mmio_range xelpmp_oaddrm_steering_table[] = { { 0x393200, 0x39323F }, { 0x393400, 0x3934FF }, @@ -236,21 +245,60 @@ static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = { }; static const struct xe_mmio_range xe3lpm_instance0_steering_table[] = { - { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */ + { 0x384000, 0x3841FF }, /* GAM */ + { 0x384400, 0x3847DF }, /* GAM */ { 0x384900, 0x384AFF }, /* GAM */ { 0x389560, 0x3895FF }, /* MEDIAINF */ { 0x38B600, 0x38B8FF }, /* L3BANK */ { 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */ - { 0x38D0D0, 0x38F0FF }, /* MEDIAINF, GAM */ + { 0x38D0D0, 0x38F0FF }, /* MEDIAINF, rsvd, GAM */ { 0x393C00, 0x393C7F }, /* MEDIAINF */ {}, }; +/* + * Different "GAM" ranges have different rules; GAMWKRS, STLB, and GAMREQSTRM + * range subtypes need to be steered to (1,0), while all other GAM subtypes + * are steered to (0,0) and are included in the "INSTANCE0" table farther + * down. + */ +static const struct xe_mmio_range xe3p_xpc_gam_grp1_steering_table[] = { + { 0x004000, 0x004AFF }, /* GAMREQSTRM, rsvd, STLB, GAMWKRS, GAMREQSTRM */ + { 0x00F100, 0x00FFFF }, /* GAMWKRS */ + {}, +}; + +static const struct xe_mmio_range xe3p_xpc_node_steering_table[] = { + { 0x00B000, 0x00B0FF }, + { 0x00D880, 0x00D8FF }, + {}, +}; + +static const struct xe_mmio_range xe3p_xpc_instance0_steering_table[] = { + { 0x00B500, 0x00B6FF }, /* PSMI */ + { 0x00C800, 0x00CFFF }, /* GAMCTRL */ + { 0x00F000, 0x00F0FF }, /* GAMCTRL */ + {}, +}; + static void init_steering_l3bank(struct xe_gt *gt) { + struct xe_device *xe = gt_to_xe(gt); struct xe_mmio *mmio = >->mmio; - if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { + if (GRAPHICS_VER(xe) >= 35) { + unsigned int first_bank = xe_l3_bank_mask_ffs(gt->fuse_topo.l3_bank_mask); + const int banks_per_node = 4; + unsigned int node = first_bank / banks_per_node; + + /* L3BANK ranges place node in grpID, bank in instanceid */ + gt->steering[L3BANK].group_target = node; + gt->steering[L3BANK].instance_target = first_bank % banks_per_node; + + /* NODE ranges split the node across grpid and instanceid */ + gt->steering[NODE].group_target = node >> 1; + gt->steering[NODE].instance_target = node & 1; + } else if (GRAPHICS_VERx100(xe) >= 1270) { u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, xe_mmio_read32(mmio, MIRROR_FUSE3)); u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK, @@ -263,7 +311,7 @@ static void init_steering_l3bank(struct xe_gt *gt) gt->steering[L3BANK].group_target = __ffs(mslice_mask); gt->steering[L3BANK].instance_target = bank_mask & BIT(0) ? 0 : 2; - } else if (gt_to_xe(gt)->info.platform == XE_DG2) { + } else if (xe->info.platform == XE_DG2) { u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, xe_mmio_read32(mmio, MIRROR_FUSE3)); u32 bank = __ffs(mslice_mask) * 8; @@ -418,16 +466,24 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt) gt->steering[SQIDI_PSMI].instance_target = select & 0x1; } +static void init_steering_gam1(struct xe_gt *gt) +{ + gt->steering[GAM1].group_target = 1; + gt->steering[GAM1].instance_target = 0; +} + static const struct { const char *name; void (*init)(struct xe_gt *gt); } xe_steering_types[] = { [L3BANK] = { "L3BANK", init_steering_l3bank }, + [NODE] = { "NODE", NULL }, /* initialized by l3bank init */ [MSLICE] = { "MSLICE", init_steering_mslice }, [LNCF] = { "LNCF", NULL }, /* initialized by mslice init */ - [DSS] = { "DSS", init_steering_dss }, + [DSS] = { "DSS / XeCore", init_steering_dss }, [OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm }, [SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi }, + [GAM1] = { "GAMWKRS / STLB / GAMREQSTRM", init_steering_gam1 }, [INSTANCE0] = { "INSTANCE 0", NULL }, [IMPLICIT_STEERING] = { "IMPLICIT", NULL }, }; @@ -466,7 +522,19 @@ void xe_gt_mcr_init_early(struct xe_gt *gt) gt->steering[OADDRM].ranges = xelpmp_oaddrm_steering_table; } } else { - if (GRAPHICS_VER(xe) >= 20) { + if (GRAPHICS_VERx100(xe) == 3511) { + /* + * TODO: there are some ranges in bspec with missing + * termination: [0x00B000, 0x00B0FF] and + * [0x00D880, 0x00D8FF] (NODE); [0x00B100, 0x00B3FF] + * (L3BANK). Update them here once bspec is updated. + */ + gt->steering[DSS].ranges = xe3p_xpc_xecore_steering_table; + gt->steering[GAM1].ranges = xe3p_xpc_gam_grp1_steering_table; + gt->steering[INSTANCE0].ranges = xe3p_xpc_instance0_steering_table; + gt->steering[L3BANK].ranges = xelpg_l3bank_steering_table; + gt->steering[NODE].ranges = xe3p_xpc_node_steering_table; + } else if (GRAPHICS_VER(xe) >= 20) { gt->steering[DSS].ranges = xe2lpg_dss_steering_table; gt->steering[SQIDI_PSMI].ranges = xe2lpg_sqidi_psmi_steering_table; gt->steering[INSTANCE0].ranges = xe2lpg_instance0_steering_table; diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c deleted file mode 100644 index a054d6010ae0..000000000000 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ /dev/null @@ -1,679 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2022 Intel Corporation - */ - -#include "xe_gt_pagefault.h" - -#include <linux/bitfield.h> -#include <linux/circ_buf.h> - -#include <drm/drm_exec.h> -#include <drm/drm_managed.h> - -#include "abi/guc_actions_abi.h" -#include "xe_bo.h" -#include "xe_gt.h" -#include "xe_gt_printk.h" -#include "xe_gt_stats.h" -#include "xe_guc.h" -#include "xe_guc_ct.h" -#include "xe_migrate.h" -#include "xe_svm.h" -#include "xe_trace_bo.h" -#include "xe_vm.h" -#include "xe_vram_types.h" - -struct pagefault { - u64 page_addr; - u32 asid; - u16 pdata; - u8 vfid; - u8 access_type; - u8 fault_type; - u8 fault_level; - u8 engine_class; - u8 engine_instance; - u8 fault_unsuccessful; - bool trva_fault; -}; - -enum access_type { - ACCESS_TYPE_READ = 0, - ACCESS_TYPE_WRITE = 1, - ACCESS_TYPE_ATOMIC = 2, - ACCESS_TYPE_RESERVED = 3, -}; - -enum fault_type { - NOT_PRESENT = 0, - WRITE_ACCESS_VIOLATION = 1, - ATOMIC_ACCESS_VIOLATION = 2, -}; - -struct acc { - u64 va_range_base; - u32 asid; - u32 sub_granularity; - u8 granularity; - u8 vfid; - u8 access_type; - u8 engine_class; - u8 engine_instance; -}; - -static bool access_is_atomic(enum access_type access_type) -{ - return access_type == ACCESS_TYPE_ATOMIC; -} - -static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma) -{ - return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present, - vma->tile_invalidated); -} - -static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma, - bool need_vram_move, struct xe_vram_region *vram) -{ - struct xe_bo *bo = xe_vma_bo(vma); - struct xe_vm *vm = xe_vma_vm(vma); - int err; - - err = xe_vm_lock_vma(exec, vma); - if (err) - return err; - - if (!bo) - return 0; - - return need_vram_move ? xe_bo_migrate(bo, vram->placement, NULL, exec) : - xe_bo_validate(bo, vm, true, exec); -} - -static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma, - bool atomic) -{ - struct xe_vm *vm = xe_vma_vm(vma); - struct xe_tile *tile = gt_to_tile(gt); - struct xe_validation_ctx ctx; - struct drm_exec exec; - struct dma_fence *fence; - int err, needs_vram; - - lockdep_assert_held_write(&vm->lock); - - needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic); - if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma))) - return needs_vram < 0 ? needs_vram : -EACCES; - - xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1); - xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024); - - trace_xe_vma_pagefault(vma); - - /* Check if VMA is valid, opportunistic check only */ - if (vma_is_valid(tile, vma) && !atomic) - return 0; - -retry_userptr: - if (xe_vma_is_userptr(vma) && - xe_vma_userptr_check_repin(to_userptr_vma(vma))) { - struct xe_userptr_vma *uvma = to_userptr_vma(vma); - - err = xe_vma_userptr_pin_pages(uvma); - if (err) - return err; - } - - /* Lock VM and BOs dma-resv */ - xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {}); - drm_exec_until_all_locked(&exec) { - err = xe_pf_begin(&exec, vma, needs_vram == 1, tile->mem.vram); - drm_exec_retry_on_contention(&exec); - xe_validation_retry_on_oom(&ctx, &err); - if (err) - goto unlock_dma_resv; - - /* Bind VMA only to the GT that has faulted */ - trace_xe_vma_pf_bind(vma); - xe_vm_set_validation_exec(vm, &exec); - fence = xe_vma_rebind(vm, vma, BIT(tile->id)); - xe_vm_set_validation_exec(vm, NULL); - if (IS_ERR(fence)) { - err = PTR_ERR(fence); - xe_validation_retry_on_oom(&ctx, &err); - goto unlock_dma_resv; - } - } - - dma_fence_wait(fence, false); - dma_fence_put(fence); - -unlock_dma_resv: - xe_validation_ctx_fini(&ctx); - if (err == -EAGAIN) - goto retry_userptr; - - return err; -} - -static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid) -{ - struct xe_vm *vm; - - down_read(&xe->usm.lock); - vm = xa_load(&xe->usm.asid_to_vm, asid); - if (vm && xe_vm_in_fault_mode(vm)) - xe_vm_get(vm); - else - vm = ERR_PTR(-EINVAL); - up_read(&xe->usm.lock); - - return vm; -} - -static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) -{ - struct xe_device *xe = gt_to_xe(gt); - struct xe_vm *vm; - struct xe_vma *vma = NULL; - int err; - bool atomic; - - /* SW isn't expected to handle TRTT faults */ - if (pf->trva_fault) - return -EFAULT; - - vm = asid_to_vm(xe, pf->asid); - if (IS_ERR(vm)) - return PTR_ERR(vm); - - /* - * TODO: Change to read lock? Using write lock for simplicity. - */ - down_write(&vm->lock); - - if (xe_vm_is_closed(vm)) { - err = -ENOENT; - goto unlock_vm; - } - - vma = xe_vm_find_vma_by_addr(vm, pf->page_addr); - if (!vma) { - err = -EINVAL; - goto unlock_vm; - } - - atomic = access_is_atomic(pf->access_type); - - if (xe_vma_is_cpu_addr_mirror(vma)) - err = xe_svm_handle_pagefault(vm, vma, gt, - pf->page_addr, atomic); - else - err = handle_vma_pagefault(gt, vma, atomic); - -unlock_vm: - if (!err) - vm->usm.last_fault_vma = vma; - up_write(&vm->lock); - xe_vm_put(vm); - - return err; -} - -static int send_pagefault_reply(struct xe_guc *guc, - struct xe_guc_pagefault_reply *reply) -{ - u32 action[] = { - XE_GUC_ACTION_PAGE_FAULT_RES_DESC, - reply->dw0, - reply->dw1, - }; - - return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0); -} - -static void print_pagefault(struct xe_gt *gt, struct pagefault *pf) -{ - xe_gt_dbg(gt, "\n\tASID: %d\n" - "\tVFID: %d\n" - "\tPDATA: 0x%04x\n" - "\tFaulted Address: 0x%08x%08x\n" - "\tFaultType: %d\n" - "\tAccessType: %d\n" - "\tFaultLevel: %d\n" - "\tEngineClass: %d %s\n" - "\tEngineInstance: %d\n", - pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr), - lower_32_bits(pf->page_addr), - pf->fault_type, pf->access_type, pf->fault_level, - pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class), - pf->engine_instance); -} - -#define PF_MSG_LEN_DW 4 - -static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf) -{ - const struct xe_guc_pagefault_desc *desc; - bool ret = false; - - spin_lock_irq(&pf_queue->lock); - if (pf_queue->tail != pf_queue->head) { - desc = (const struct xe_guc_pagefault_desc *) - (pf_queue->data + pf_queue->tail); - - pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0); - pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0); - pf->engine_class = FIELD_GET(PFD_ENG_CLASS, desc->dw0); - pf->engine_instance = FIELD_GET(PFD_ENG_INSTANCE, desc->dw0); - pf->pdata = FIELD_GET(PFD_PDATA_HI, desc->dw1) << - PFD_PDATA_HI_SHIFT; - pf->pdata |= FIELD_GET(PFD_PDATA_LO, desc->dw0); - pf->asid = FIELD_GET(PFD_ASID, desc->dw1); - pf->vfid = FIELD_GET(PFD_VFID, desc->dw2); - pf->access_type = FIELD_GET(PFD_ACCESS_TYPE, desc->dw2); - pf->fault_type = FIELD_GET(PFD_FAULT_TYPE, desc->dw2); - pf->page_addr = (u64)(FIELD_GET(PFD_VIRTUAL_ADDR_HI, desc->dw3)) << - PFD_VIRTUAL_ADDR_HI_SHIFT; - pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) << - PFD_VIRTUAL_ADDR_LO_SHIFT; - - pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) % - pf_queue->num_dw; - ret = true; - } - spin_unlock_irq(&pf_queue->lock); - - return ret; -} - -static bool pf_queue_full(struct pf_queue *pf_queue) -{ - lockdep_assert_held(&pf_queue->lock); - - return CIRC_SPACE(pf_queue->head, pf_queue->tail, - pf_queue->num_dw) <= - PF_MSG_LEN_DW; -} - -int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len) -{ - struct xe_gt *gt = guc_to_gt(guc); - struct pf_queue *pf_queue; - unsigned long flags; - u32 asid; - bool full; - - if (unlikely(len != PF_MSG_LEN_DW)) - return -EPROTO; - - asid = FIELD_GET(PFD_ASID, msg[1]); - pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE); - - /* - * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0 - */ - xe_gt_assert(gt, !(pf_queue->num_dw % PF_MSG_LEN_DW)); - - spin_lock_irqsave(&pf_queue->lock, flags); - full = pf_queue_full(pf_queue); - if (!full) { - memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32)); - pf_queue->head = (pf_queue->head + len) % - pf_queue->num_dw; - queue_work(gt->usm.pf_wq, &pf_queue->worker); - } else { - xe_gt_warn(gt, "PageFault Queue full, shouldn't be possible\n"); - } - spin_unlock_irqrestore(&pf_queue->lock, flags); - - return full ? -ENOSPC : 0; -} - -#define USM_QUEUE_MAX_RUNTIME_MS 20 - -static void pf_queue_work_func(struct work_struct *w) -{ - struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker); - struct xe_gt *gt = pf_queue->gt; - struct xe_guc_pagefault_reply reply = {}; - struct pagefault pf = {}; - unsigned long threshold; - int ret; - - threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS); - - while (get_pagefault(pf_queue, &pf)) { - ret = handle_pagefault(gt, &pf); - if (unlikely(ret)) { - print_pagefault(gt, &pf); - pf.fault_unsuccessful = 1; - xe_gt_dbg(gt, "Fault response: Unsuccessful %pe\n", ERR_PTR(ret)); - } - - reply.dw0 = FIELD_PREP(PFR_VALID, 1) | - FIELD_PREP(PFR_SUCCESS, pf.fault_unsuccessful) | - FIELD_PREP(PFR_REPLY, PFR_ACCESS) | - FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) | - FIELD_PREP(PFR_ASID, pf.asid); - - reply.dw1 = FIELD_PREP(PFR_VFID, pf.vfid) | - FIELD_PREP(PFR_ENG_INSTANCE, pf.engine_instance) | - FIELD_PREP(PFR_ENG_CLASS, pf.engine_class) | - FIELD_PREP(PFR_PDATA, pf.pdata); - - send_pagefault_reply(>->uc.guc, &reply); - - if (time_after(jiffies, threshold) && - pf_queue->tail != pf_queue->head) { - queue_work(gt->usm.pf_wq, w); - break; - } - } -} - -static void acc_queue_work_func(struct work_struct *w); - -static void pagefault_fini(void *arg) -{ - struct xe_gt *gt = arg; - struct xe_device *xe = gt_to_xe(gt); - - if (!xe->info.has_usm) - return; - - destroy_workqueue(gt->usm.acc_wq); - destroy_workqueue(gt->usm.pf_wq); -} - -static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue) -{ - struct xe_device *xe = gt_to_xe(gt); - xe_dss_mask_t all_dss; - int num_dss, num_eus; - - bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask, - XE_MAX_DSS_FUSE_BITS); - - num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS); - num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss, - XE_MAX_EU_FUSE_BITS) * num_dss; - - /* - * user can issue separate page faults per EU and per CS - * - * XXX: Multiplier required as compute UMD are getting PF queue errors - * without it. Follow on why this multiplier is required. - */ -#define PF_MULTIPLIER 8 - pf_queue->num_dw = - (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER; - pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw); -#undef PF_MULTIPLIER - - pf_queue->gt = gt; - pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw, - sizeof(u32), GFP_KERNEL); - if (!pf_queue->data) - return -ENOMEM; - - spin_lock_init(&pf_queue->lock); - INIT_WORK(&pf_queue->worker, pf_queue_work_func); - - return 0; -} - -int xe_gt_pagefault_init(struct xe_gt *gt) -{ - struct xe_device *xe = gt_to_xe(gt); - int i, ret = 0; - - if (!xe->info.has_usm) - return 0; - - for (i = 0; i < NUM_PF_QUEUE; ++i) { - ret = xe_alloc_pf_queue(gt, >->usm.pf_queue[i]); - if (ret) - return ret; - } - for (i = 0; i < NUM_ACC_QUEUE; ++i) { - gt->usm.acc_queue[i].gt = gt; - spin_lock_init(>->usm.acc_queue[i].lock); - INIT_WORK(>->usm.acc_queue[i].worker, acc_queue_work_func); - } - - gt->usm.pf_wq = alloc_workqueue("xe_gt_page_fault_work_queue", - WQ_UNBOUND | WQ_HIGHPRI, NUM_PF_QUEUE); - if (!gt->usm.pf_wq) - return -ENOMEM; - - gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue", - WQ_UNBOUND | WQ_HIGHPRI, - NUM_ACC_QUEUE); - if (!gt->usm.acc_wq) { - destroy_workqueue(gt->usm.pf_wq); - return -ENOMEM; - } - - return devm_add_action_or_reset(xe->drm.dev, pagefault_fini, gt); -} - -void xe_gt_pagefault_reset(struct xe_gt *gt) -{ - struct xe_device *xe = gt_to_xe(gt); - int i; - - if (!xe->info.has_usm) - return; - - for (i = 0; i < NUM_PF_QUEUE; ++i) { - spin_lock_irq(>->usm.pf_queue[i].lock); - gt->usm.pf_queue[i].head = 0; - gt->usm.pf_queue[i].tail = 0; - spin_unlock_irq(>->usm.pf_queue[i].lock); - } - - for (i = 0; i < NUM_ACC_QUEUE; ++i) { - spin_lock(>->usm.acc_queue[i].lock); - gt->usm.acc_queue[i].head = 0; - gt->usm.acc_queue[i].tail = 0; - spin_unlock(>->usm.acc_queue[i].lock); - } -} - -static int granularity_in_byte(int val) -{ - switch (val) { - case 0: - return SZ_128K; - case 1: - return SZ_2M; - case 2: - return SZ_16M; - case 3: - return SZ_64M; - default: - return 0; - } -} - -static int sub_granularity_in_byte(int val) -{ - return (granularity_in_byte(val) / 32); -} - -static void print_acc(struct xe_gt *gt, struct acc *acc) -{ - xe_gt_warn(gt, "Access counter request:\n" - "\tType: %s\n" - "\tASID: %d\n" - "\tVFID: %d\n" - "\tEngine: %d:%d\n" - "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n" - "\tSub_Granularity Vector: 0x%08x\n" - "\tVA Range base: 0x%016llx\n", - acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL", - acc->asid, acc->vfid, acc->engine_class, acc->engine_instance, - granularity_in_byte(acc->granularity) / SZ_1K, - sub_granularity_in_byte(acc->granularity) / SZ_1K, - acc->sub_granularity, acc->va_range_base); -} - -static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc) -{ - u64 page_va = acc->va_range_base + (ffs(acc->sub_granularity) - 1) * - sub_granularity_in_byte(acc->granularity); - - return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K); -} - -static int handle_acc(struct xe_gt *gt, struct acc *acc) -{ - struct xe_device *xe = gt_to_xe(gt); - struct xe_tile *tile = gt_to_tile(gt); - struct xe_validation_ctx ctx; - struct drm_exec exec; - struct xe_vm *vm; - struct xe_vma *vma; - int ret = 0; - - /* We only support ACC_TRIGGER at the moment */ - if (acc->access_type != ACC_TRIGGER) - return -EINVAL; - - vm = asid_to_vm(xe, acc->asid); - if (IS_ERR(vm)) - return PTR_ERR(vm); - - down_read(&vm->lock); - - /* Lookup VMA */ - vma = get_acc_vma(vm, acc); - if (!vma) { - ret = -EINVAL; - goto unlock_vm; - } - - trace_xe_vma_acc(vma); - - /* Userptr or null can't be migrated, nothing to do */ - if (xe_vma_has_no_bo(vma)) - goto unlock_vm; - - /* Lock VM and BOs dma-resv */ - xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {}); - drm_exec_until_all_locked(&exec) { - ret = xe_pf_begin(&exec, vma, IS_DGFX(vm->xe), tile->mem.vram); - drm_exec_retry_on_contention(&exec); - xe_validation_retry_on_oom(&ctx, &ret); - } - - xe_validation_ctx_fini(&ctx); -unlock_vm: - up_read(&vm->lock); - xe_vm_put(vm); - - return ret; -} - -#define make_u64(hi__, low__) ((u64)(hi__) << 32 | (u64)(low__)) - -#define ACC_MSG_LEN_DW 4 - -static bool get_acc(struct acc_queue *acc_queue, struct acc *acc) -{ - const struct xe_guc_acc_desc *desc; - bool ret = false; - - spin_lock(&acc_queue->lock); - if (acc_queue->tail != acc_queue->head) { - desc = (const struct xe_guc_acc_desc *) - (acc_queue->data + acc_queue->tail); - - acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2); - acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 | - FIELD_GET(ACC_SUBG_LO, desc->dw0); - acc->engine_class = FIELD_GET(ACC_ENG_CLASS, desc->dw1); - acc->engine_instance = FIELD_GET(ACC_ENG_INSTANCE, desc->dw1); - acc->asid = FIELD_GET(ACC_ASID, desc->dw1); - acc->vfid = FIELD_GET(ACC_VFID, desc->dw2); - acc->access_type = FIELD_GET(ACC_TYPE, desc->dw0); - acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI, - desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO); - - acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) % - ACC_QUEUE_NUM_DW; - ret = true; - } - spin_unlock(&acc_queue->lock); - - return ret; -} - -static void acc_queue_work_func(struct work_struct *w) -{ - struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker); - struct xe_gt *gt = acc_queue->gt; - struct acc acc = {}; - unsigned long threshold; - int ret; - - threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS); - - while (get_acc(acc_queue, &acc)) { - ret = handle_acc(gt, &acc); - if (unlikely(ret)) { - print_acc(gt, &acc); - xe_gt_warn(gt, "ACC: Unsuccessful %pe\n", ERR_PTR(ret)); - } - - if (time_after(jiffies, threshold) && - acc_queue->tail != acc_queue->head) { - queue_work(gt->usm.acc_wq, w); - break; - } - } -} - -static bool acc_queue_full(struct acc_queue *acc_queue) -{ - lockdep_assert_held(&acc_queue->lock); - - return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <= - ACC_MSG_LEN_DW; -} - -int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len) -{ - struct xe_gt *gt = guc_to_gt(guc); - struct acc_queue *acc_queue; - u32 asid; - bool full; - - /* - * The below logic doesn't work unless ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW == 0 - */ - BUILD_BUG_ON(ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW); - - if (unlikely(len != ACC_MSG_LEN_DW)) - return -EPROTO; - - asid = FIELD_GET(ACC_ASID, msg[1]); - acc_queue = >->usm.acc_queue[asid % NUM_ACC_QUEUE]; - - spin_lock(&acc_queue->lock); - full = acc_queue_full(acc_queue); - if (!full) { - memcpy(acc_queue->data + acc_queue->head, msg, - len * sizeof(u32)); - acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW; - queue_work(gt->usm.acc_wq, &acc_queue->worker); - } else { - xe_gt_warn(gt, "ACC Queue full, dropping ACC\n"); - } - spin_unlock(&acc_queue->lock); - - return full ? -ENOSPC : 0; -} diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.h b/drivers/gpu/drm/xe/xe_gt_pagefault.h deleted file mode 100644 index 839c065a5e4c..000000000000 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2022 Intel Corporation - */ - -#ifndef _XE_GT_PAGEFAULT_H_ -#define _XE_GT_PAGEFAULT_H_ - -#include <linux/types.h> - -struct xe_gt; -struct xe_guc; - -int xe_gt_pagefault_init(struct xe_gt *gt); -void xe_gt_pagefault_reset(struct xe_gt *gt); -int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len); -int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len); - -#endif /* _XE_GT_PAGEFAULT_ */ diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c index c4dda87b47cc..0714c758b9c1 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c @@ -158,39 +158,19 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) xe_gt_sriov_pf_service_update(gt); } -static u32 pf_get_vf_regs_stride(struct xe_device *xe) -{ - return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000; -} - -static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride) -{ - struct xe_reg pf_reg = vf_reg; - - pf_reg.vf = 0; - pf_reg.addr += stride * vfid; - - return pf_reg; -} - static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid) { - u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt)); - struct xe_reg scratch; - int n, count; + struct xe_mmio mmio; + int n; + + xe_mmio_init_vf_view(&mmio, >->mmio, vfid); if (xe_gt_is_media_type(gt)) { - count = MED_VF_SW_FLAG_COUNT; - for (n = 0; n < count; n++) { - scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride); - xe_mmio_write32(>->mmio, scratch, 0); - } + for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++) + xe_mmio_write32(&mmio, MED_VF_SW_FLAG(n), 0); } else { - count = VF_SW_FLAG_COUNT; - for (n = 0; n < count; n++) { - scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride); - xe_mmio_write32(>->mmio, scratch, 0); - } + for (n = 0; n < VF_SW_FLAG_COUNT; n++) + xe_mmio_write32(&mmio, VF_SW_FLAG(n), 0); } } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 6344b5205c08..62f6cc45a764 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -9,6 +9,7 @@ #include "abi/guc_actions_sriov_abi.h" #include "abi/guc_klvs_abi.h" +#include "regs/xe_gtt_defs.h" #include "regs/xe_guc_regs.h" #include "xe_bo.h" @@ -697,6 +698,22 @@ static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) return fair; } +static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) +{ + bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt)); + u64 shareable = ALIGN_DOWN(GUC_GGTT_TOP, SZ_512M); + u64 alignment = pf_get_ggtt_alignment(gt); + + if (admin_only_pf && num_vfs == 1) + return ALIGN_DOWN(shareable, alignment); + + /* need to hardcode due to ~512M of GGTT being reserved */ + if (num_vfs > 56) + return SZ_64M - SZ_8M; + + return rounddown_pow_of_two(shareable / num_vfs); +} + /** * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT. * @gt: the &xe_gt (can't be media) @@ -710,6 +727,7 @@ static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs) { + u64 profile = pf_profile_fair_ggtt(gt, num_vfs); u64 fair; xe_gt_assert(gt, vfid); @@ -723,9 +741,71 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, if (!fair) return -ENOSPC; + fair = min(fair, profile); + if (fair < profile) + xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %llu vs %llu)\n", + "GGTT", fair, profile); + return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair); } +/** + * xe_gt_sriov_pf_config_ggtt_save() - Save a VF provisioned GGTT data into a buffer. + * @gt: the &xe_gt + * @vfid: VF identifier (can't be 0) + * @buf: the GGTT data destination buffer (or NULL to query the buf size) + * @size: the size of the buffer (or 0 to query the buf size) + * + * This function can only be called on PF. + * + * Return: size of the buffer needed to save GGTT data if querying, + * 0 on successful save or a negative error code on failure. + */ +ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid, + void *buf, size_t size) +{ + struct xe_ggtt_node *node; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid); + xe_gt_assert(gt, !(!buf ^ !size)); + + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + node = pf_pick_vf_config(gt, vfid)->ggtt_region; + + if (!buf) + return xe_ggtt_node_pt_size(node); + + return xe_ggtt_node_save(node, buf, size, vfid); +} + +/** + * xe_gt_sriov_pf_config_ggtt_restore() - Restore a VF provisioned GGTT data from a buffer. + * @gt: the &xe_gt + * @vfid: VF identifier (can't be 0) + * @buf: the GGTT data source buffer + * @size: the size of the buffer + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size) +{ + struct xe_ggtt_node *node; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid); + + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + node = pf_pick_vf_config(gt, vfid)->ggtt_region; + + return xe_ggtt_node_load(node, buf, size, vfid); +} + static u32 pf_get_min_spare_ctxs(struct xe_gt *gt) { /* XXX: preliminary */ @@ -924,7 +1004,8 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns const char *what, const char *(*unit)(u32), unsigned int last, int err) { - xe_gt_assert(gt, first); + char name[8]; + xe_gt_assert(gt, num_vfs); xe_gt_assert(gt, first <= last); @@ -932,8 +1013,9 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err); if (unlikely(err)) { - xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", - first, first + num_vfs - 1, what); + xe_gt_sriov_notice(gt, "Failed to bulk provision %s..VF%u with %s\n", + xe_sriov_function_name(first, name, sizeof(name)), + first + num_vfs - 1, what); if (last > first) pf_config_bulk_set_u32_done(gt, first, last - first, value, get, what, unit, last, 0); @@ -942,8 +1024,9 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */ value = get(gt, first); - xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n", - first, first + num_vfs - 1, value, unit(value), what); + xe_gt_sriov_info(gt, "%s..VF%u provisioned with %u%s %s\n", + xe_sriov_function_name(first, name, sizeof(name)), + first + num_vfs - 1, value, unit(value), what); return 0; } @@ -982,6 +1065,16 @@ int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, "GuC context IDs", no_unit, n, err); } +static u32 pf_profile_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) +{ + bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt)); + + if (admin_only_pf && num_vfs == 1) + return ALIGN_DOWN(GUC_ID_MAX, SZ_1K); + + return rounddown_pow_of_two(GUC_ID_MAX / num_vfs); +} + static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) { struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; @@ -1014,6 +1107,7 @@ static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs) { + u32 profile = pf_profile_fair_ctxs(gt, num_vfs); u32 fair; xe_gt_assert(gt, vfid); @@ -1026,6 +1120,11 @@ int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, if (!fair) return -ENOSPC; + fair = min(fair, profile); + if (fair < profile) + xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n", + "GuC context IDs", fair, profile); + return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair); } @@ -1230,6 +1329,17 @@ int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid, "GuC doorbell IDs", no_unit, n, err); } +static u32 pf_profile_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) +{ + bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt)); + + /* XXX: preliminary */ + if (admin_only_pf && num_vfs == 1) + return GUC_NUM_DOORBELLS - SZ_16; + + return rounddown_pow_of_two(GUC_NUM_DOORBELLS / (num_vfs + 1)); +} + static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) { struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; @@ -1262,6 +1372,7 @@ static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs) { + u32 profile = pf_profile_fair_dbs(gt, num_vfs); u32 fair; xe_gt_assert(gt, vfid); @@ -1274,6 +1385,11 @@ int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, if (!fair) return -ENOSPC; + fair = min(fair, profile); + if (fair < profile) + xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n", + "GuC doorbell IDs", fair, profile); + return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair); } @@ -1484,7 +1600,8 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_NEEDS_2M | XE_BO_FLAG_PINNED | - XE_BO_FLAG_PINNED_LATE_RESTORE); + XE_BO_FLAG_PINNED_LATE_RESTORE | + XE_BO_FLAG_FORCE_USER_VRAM); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -1547,7 +1664,8 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size { int err; - xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt))); + if (!xe_device_has_lmtt(gt_to_xe(gt))) + return -EPERM; mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); if (vfid) @@ -1597,6 +1715,32 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, "LMEM", n, err); } +static struct xe_bo *pf_get_vf_config_lmem_obj(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); + + return config->lmem_obj; +} + +/** + * xe_gt_sriov_pf_config_get_lmem_obj() - Take a reference to the struct &xe_bo backing VF LMEM. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function can only be called on PF. + * The caller is responsible for calling xe_bo_put() on the returned object. + * + * Return: pointer to struct &xe_bo backing VF LMEM (if any). + */ +struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, vfid); + + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + return xe_bo_get(pf_get_vf_config_lmem_obj(gt, vfid)); +} + static u64 pf_query_free_lmem(struct xe_gt *gt) { struct xe_tile *tile = gt->tile; @@ -1722,7 +1866,7 @@ static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid, return 0; } -static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) +static u32 pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) { struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); @@ -1730,47 +1874,107 @@ static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) } /** - * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF. + * xe_gt_sriov_pf_config_set_exec_quantum_locked() - Configure PF/VF execution quantum. * @gt: the &xe_gt - * @vfid: the VF identifier + * @vfid: the PF or VF identifier * @exec_quantum: requested execution quantum in milliseconds (0 is infinity) * - * This function can only be called on PF. + * This function can only be called on PF with the master mutex hold. + * It will log the provisioned value or an error in case of the failure. * * Return: 0 on success or a negative error code on failure. */ -int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, - u32 exec_quantum) +int xe_gt_sriov_pf_config_set_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid, + u32 exec_quantum) { int err; - mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + err = pf_provision_exec_quantum(gt, vfid, exec_quantum); - mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); return pf_config_set_u32_done(gt, vfid, exec_quantum, - xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid), + pf_get_exec_quantum(gt, vfid), "execution quantum", exec_quantum_unit, err); } /** - * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum. + * xe_gt_sriov_pf_config_set_exec_quantum() - Configure PF/VF execution quantum. * @gt: the &xe_gt - * @vfid: the VF identifier + * @vfid: the PF or VF identifier + * @exec_quantum: requested execution quantum in milliseconds (0 is infinity) * * This function can only be called on PF. + * It will log the provisioned value or an error in case of the failure. * - * Return: VF's (or PF's) execution quantum in milliseconds. + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, + u32 exec_quantum) +{ + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + return xe_gt_sriov_pf_config_set_exec_quantum_locked(gt, vfid, exec_quantum); +} + +/** + * xe_gt_sriov_pf_config_get_exec_quantum_locked() - Get PF/VF execution quantum. + * @gt: the &xe_gt + * @vfid: the PF or VF identifier + * + * This function can only be called on PF with the master mutex hold. + * + * Return: execution quantum in milliseconds (or 0 if infinity). + */ +u32 xe_gt_sriov_pf_config_get_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid) +{ + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + + return pf_get_exec_quantum(gt, vfid); +} + +/** + * xe_gt_sriov_pf_config_get_exec_quantum() - Get PF/VF execution quantum. + * @gt: the &xe_gt + * @vfid: the PF or VF identifier + * + * This function can only be called on PF. + * + * Return: execution quantum in milliseconds (or 0 if infinity). */ u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) { - u32 exec_quantum; + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); - mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); - exec_quantum = pf_get_exec_quantum(gt, vfid); - mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + return pf_get_exec_quantum(gt, vfid); +} + +/** + * xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked() - Configure EQ for PF and VFs. + * @gt: the &xe_gt to configure + * @exec_quantum: requested execution quantum in milliseconds (0 is infinity) + * + * This function can only be called on PF with the master mutex hold. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(struct xe_gt *gt, u32 exec_quantum) +{ + unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt); + unsigned int n; + int err = 0; - return exec_quantum; + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + + for (n = 0; n <= totalvfs; n++) { + err = pf_provision_exec_quantum(gt, VFID(n), exec_quantum); + if (err) + break; + } + + return pf_config_bulk_set_u32_done(gt, 0, 1 + totalvfs, exec_quantum, + pf_get_exec_quantum, "execution quantum", + exec_quantum_unit, n, err); } static const char *preempt_timeout_unit(u32 preempt_timeout) @@ -1793,7 +1997,7 @@ static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid, return 0; } -static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) +static u32 pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) { struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); @@ -1801,47 +2005,106 @@ static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) } /** - * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF. + * xe_gt_sriov_pf_config_set_preempt_timeout_locked() - Configure PF/VF preemption timeout. * @gt: the &xe_gt - * @vfid: the VF identifier + * @vfid: the PF or VF identifier * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity) * - * This function can only be called on PF. + * This function can only be called on PF with the master mutex hold. + * It will log the provisioned value or an error in case of the failure. * * Return: 0 on success or a negative error code on failure. */ -int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid, - u32 preempt_timeout) +int xe_gt_sriov_pf_config_set_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid, + u32 preempt_timeout) { int err; - mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout); - mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); return pf_config_set_u32_done(gt, vfid, preempt_timeout, - xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid), + pf_get_preempt_timeout(gt, vfid), "preemption timeout", preempt_timeout_unit, err); } /** - * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout. + * xe_gt_sriov_pf_config_set_preempt_timeout() - Configure PF/VF preemption timeout. * @gt: the &xe_gt - * @vfid: the VF identifier + * @vfid: the PF or VF identifier + * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity) + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid, + u32 preempt_timeout) +{ + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + return xe_gt_sriov_pf_config_set_preempt_timeout_locked(gt, vfid, preempt_timeout); +} + +/** + * xe_gt_sriov_pf_config_get_preempt_timeout_locked() - Get PF/VF preemption timeout. + * @gt: the &xe_gt + * @vfid: the PF or VF identifier + * + * This function can only be called on PF with the master mutex hold. + * + * Return: preemption timeout in microseconds (or 0 if infinity). + */ +u32 xe_gt_sriov_pf_config_get_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid) +{ + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + + return pf_get_preempt_timeout(gt, vfid); +} + +/** + * xe_gt_sriov_pf_config_get_preempt_timeout() - Get PF/VF preemption timeout. + * @gt: the &xe_gt + * @vfid: the PF or VF identifier * * This function can only be called on PF. * - * Return: VF's (or PF's) preemption timeout in microseconds. + * Return: preemption timeout in microseconds (or 0 if infinity). */ u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) { - u32 preempt_timeout; + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); - mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); - preempt_timeout = pf_get_preempt_timeout(gt, vfid); - mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + return pf_get_preempt_timeout(gt, vfid); +} - return preempt_timeout; +/** + * xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked() - Configure PT for PF and VFs. + * @gt: the &xe_gt to configure + * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity) + * + * This function can only be called on PF with the master mutex hold. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(struct xe_gt *gt, u32 preempt_timeout) +{ + unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt); + unsigned int n; + int err = 0; + + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + + for (n = 0; n <= totalvfs; n++) { + err = pf_provision_preempt_timeout(gt, VFID(n), preempt_timeout); + if (err) + break; + } + + return pf_config_bulk_set_u32_done(gt, 0, 1 + totalvfs, preempt_timeout, + pf_get_preempt_timeout, "preemption timeout", + preempt_timeout_unit, n, err); } static const char *sched_priority_unit(u32 priority) @@ -2669,3 +2932,7 @@ int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_prin return 0; } + +#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST) +#include "tests/xe_gt_sriov_pf_config_kunit.c" +#endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h index 513e6512a575..4975730423d7 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h @@ -36,14 +36,25 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs); int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs, u64 size); +struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid); u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum); +u32 xe_gt_sriov_pf_config_get_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_config_set_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid, + u32 exec_quantum); +int xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(struct xe_gt *gt, u32 exec_quantum); + u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 preempt_timeout); +u32 xe_gt_sriov_pf_config_get_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_config_set_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid, + u32 preempt_timeout); +int xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(struct xe_gt *gt, u32 preempt_timeout); + u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority); @@ -61,6 +72,11 @@ ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *bu int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, const void *buf, size_t size); +ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid, + void *buf, size_t size); +int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size); + bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_config_init(struct xe_gt *gt); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 4f7fff892bc0..bf48b05797de 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -18,6 +18,10 @@ #include "xe_gt_sriov_printk.h" #include "xe_guc_ct.h" #include "xe_sriov.h" +#include "xe_sriov_packet.h" +#include "xe_sriov_packet_types.h" +#include "xe_sriov_pf_control.h" +#include "xe_sriov_pf_migration.h" #include "xe_sriov_pf_service.h" #include "xe_tile.h" @@ -170,6 +174,7 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit) CASE2STR(FLR_SEND_START); CASE2STR(FLR_WAIT_GUC); CASE2STR(FLR_GUC_DONE); + CASE2STR(FLR_SYNC); CASE2STR(FLR_RESET_CONFIG); CASE2STR(FLR_RESET_DATA); CASE2STR(FLR_RESET_MMIO); @@ -179,9 +184,20 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit) CASE2STR(PAUSE_SEND_PAUSE); CASE2STR(PAUSE_WAIT_GUC); CASE2STR(PAUSE_GUC_DONE); - CASE2STR(PAUSE_SAVE_GUC); CASE2STR(PAUSE_FAILED); CASE2STR(PAUSED); + CASE2STR(SAVE_WIP); + CASE2STR(SAVE_PROCESS_DATA); + CASE2STR(SAVE_WAIT_DATA); + CASE2STR(SAVE_DATA_DONE); + CASE2STR(SAVE_FAILED); + CASE2STR(SAVED); + CASE2STR(RESTORE_WIP); + CASE2STR(RESTORE_PROCESS_DATA); + CASE2STR(RESTORE_WAIT_DATA); + CASE2STR(RESTORE_DATA_DONE); + CASE2STR(RESTORE_FAILED); + CASE2STR(RESTORED); CASE2STR(RESUME_WIP); CASE2STR(RESUME_SEND_RESUME); CASE2STR(RESUME_FAILED); @@ -206,6 +222,8 @@ static unsigned long pf_get_default_timeout(enum xe_gt_sriov_control_bits bit) case XE_GT_SRIOV_STATE_FLR_WIP: case XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: return 5 * HZ; + case XE_GT_SRIOV_STATE_RESTORE_WIP: + return 20 * HZ; default: return HZ; } @@ -223,7 +241,7 @@ static unsigned long *pf_peek_vf_state(struct xe_gt *gt, unsigned int vfid) { struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid); - return &cs->state; + return cs->state; } static bool pf_check_vf_state(struct xe_gt *gt, unsigned int vfid, @@ -271,12 +289,19 @@ static bool pf_expect_vf_not_state(struct xe_gt *gt, unsigned int vfid, return result; } +static void pf_track_vf_state(struct xe_gt *gt, unsigned int vfid, + enum xe_gt_sriov_control_bits bit, + const char *what) +{ + xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) %s\n", + vfid, control_bit_to_string(bit), bit, what); +} + static bool pf_enter_vf_state(struct xe_gt *gt, unsigned int vfid, enum xe_gt_sriov_control_bits bit) { if (!test_and_set_bit(bit, pf_peek_vf_state(gt, vfid))) { - xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) enter\n", - vfid, control_bit_to_string(bit), bit); + pf_track_vf_state(gt, vfid, bit, "enter"); return true; } return false; @@ -286,8 +311,7 @@ static bool pf_exit_vf_state(struct xe_gt *gt, unsigned int vfid, enum xe_gt_sriov_control_bits bit) { if (test_and_clear_bit(bit, pf_peek_vf_state(gt, vfid))) { - xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) exit\n", - vfid, control_bit_to_string(bit), bit); + pf_track_vf_state(gt, vfid, bit, "exit"); return true; } return false; @@ -321,6 +345,8 @@ static void pf_exit_vf_mismatch(struct xe_gt *gt, unsigned int vfid) pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED); } #define pf_enter_vf_state_machine_bug(gt, vfid) ({ \ @@ -351,6 +377,8 @@ static void pf_queue_vf(struct xe_gt *gt, unsigned int vfid) static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid); static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid); +static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid); +static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid); static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid); static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid); @@ -372,6 +400,8 @@ static void pf_exit_vf_wip(struct xe_gt *gt, unsigned int vfid) pf_exit_vf_flr_wip(gt, vfid); pf_exit_vf_stop_wip(gt, vfid); + pf_exit_vf_save_wip(gt, vfid); + pf_exit_vf_restore_wip(gt, vfid); pf_exit_vf_pause_wip(gt, vfid); pf_exit_vf_resume_wip(gt, vfid); @@ -391,6 +421,8 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid) pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED); pf_exit_vf_mismatch(gt, vfid); pf_exit_vf_wip(gt, vfid); } @@ -421,8 +453,7 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid) * : PAUSE_GUC_DONE o-----restart * : | : * : | o---<--busy : - * : v / / : - * : PAUSE_SAVE_GUC : + * : / : * : / : * : / : * :....o..............o...............o...........: @@ -442,7 +473,6 @@ static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid) pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE); - pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC); } } @@ -473,41 +503,12 @@ static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid) pf_enter_vf_pause_failed(gt, vfid); } -static void pf_enter_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid) -{ - if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC)) - pf_enter_vf_state_machine_bug(gt, vfid); -} - -static bool pf_exit_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid) -{ - int err; - - if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC)) - return false; - - err = xe_gt_sriov_pf_migration_save_guc_state(gt, vfid); - if (err) { - /* retry if busy */ - if (err == -EBUSY) { - pf_enter_vf_pause_save_guc(gt, vfid); - return true; - } - /* give up on error */ - if (err == -EIO) - pf_enter_vf_mismatch(gt, vfid); - } - - pf_enter_vf_pause_completed(gt, vfid); - return true; -} - static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid) { if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE)) return false; - pf_enter_vf_pause_save_guc(gt, vfid); + pf_enter_vf_pause_completed(gt, vfid); return true; } @@ -616,7 +617,7 @@ int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid) } if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) { - xe_gt_sriov_info(gt, "VF%u paused!\n", vfid); + xe_gt_sriov_dbg(gt, "VF%u paused!\n", vfid); return 0; } @@ -667,6 +668,8 @@ static void pf_enter_vf_resumed(struct xe_gt *gt, unsigned int vfid) { pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED); pf_exit_vf_mismatch(gt, vfid); pf_exit_vf_wip(gt, vfid); } @@ -745,6 +748,16 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid) return -EPERM; } + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + xe_gt_sriov_dbg(gt, "VF%u save is in progress!\n", vfid); + return -EBUSY; + } + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + xe_gt_sriov_dbg(gt, "VF%u restore is in progress!\n", vfid); + return -EBUSY; + } + if (!pf_enter_vf_resume_wip(gt, vfid)) { xe_gt_sriov_dbg(gt, "VF%u resume already in progress!\n", vfid); return -EALREADY; @@ -755,7 +768,7 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid) return err; if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED)) { - xe_gt_sriov_info(gt, "VF%u resumed!\n", vfid); + xe_gt_sriov_dbg(gt, "VF%u resumed!\n", vfid); return 0; } @@ -769,6 +782,562 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid) } /** + * DOC: The VF SAVE state machine + * + * SAVE extends the PAUSED state. + * + * The VF SAVE state machine looks like:: + * + * ....PAUSED.................................................... + * : : + * : (O)<---------o : + * : | \ : + * : save (SAVED) (SAVE_FAILED) : + * : | ^ ^ : + * : | | | : + * : ....V...............o...........o......SAVE_WIP......... : + * : : | | | : : + * : : | empty | : : + * : : | | | : : + * : : | | | : : + * : : | DATA_DONE | : : + * : : | ^ | : : + * : : | | error : : + * : : | no_data / : : + * : : | / / : : + * : : | / / : : + * : : | / / : : + * : : o---------->PROCESS_DATA<----consume : : + * : : \ \ : : + * : : \ \ : : + * : : \ \ : : + * : : ring_full----->WAIT_DATA : : + * : : : : + * : :......................................................: : + * :............................................................: + * + * For the full state machine view, see `The VF state machine`_. + */ + +static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + xe_gt_sriov_pf_migration_ring_free(gt, vfid); + + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE); + } +} + +static void pf_enter_vf_saved(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED)) + pf_enter_vf_state_machine_bug(gt, vfid); + + xe_gt_sriov_dbg(gt, "VF%u saved!\n", vfid); + + pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_mismatch(gt, vfid); + pf_exit_vf_wip(gt, vfid); +} + +static void pf_enter_vf_save_failed(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED)) + pf_enter_vf_state_machine_bug(gt, vfid); + + wake_up_all(xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid)); + + pf_exit_vf_wip(gt, vfid); +} + +static int pf_handle_vf_save_data(struct xe_gt *gt, unsigned int vfid) +{ + int ret; + + if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid, + XE_SRIOV_PACKET_TYPE_GUC)) { + ret = xe_gt_sriov_pf_migration_guc_save(gt, vfid); + if (ret) + return ret; + + xe_gt_sriov_pf_migration_save_data_complete(gt, vfid, + XE_SRIOV_PACKET_TYPE_GUC); + + return -EAGAIN; + } + + if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid, + XE_SRIOV_PACKET_TYPE_GGTT)) { + ret = xe_gt_sriov_pf_migration_ggtt_save(gt, vfid); + if (ret) + return ret; + + xe_gt_sriov_pf_migration_save_data_complete(gt, vfid, + XE_SRIOV_PACKET_TYPE_GGTT); + + return -EAGAIN; + } + + if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid, + XE_SRIOV_PACKET_TYPE_MMIO)) { + ret = xe_gt_sriov_pf_migration_mmio_save(gt, vfid); + if (ret) + return ret; + + xe_gt_sriov_pf_migration_save_data_complete(gt, vfid, + XE_SRIOV_PACKET_TYPE_MMIO); + + return -EAGAIN; + } + + if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid, + XE_SRIOV_PACKET_TYPE_VRAM)) { + ret = xe_gt_sriov_pf_migration_vram_save(gt, vfid); + if (ret == -EAGAIN) + return -EAGAIN; + else if (ret) + return ret; + + xe_gt_sriov_pf_migration_save_data_complete(gt, vfid, + XE_SRIOV_PACKET_TYPE_VRAM); + + return -EAGAIN; + } + + return 0; +} + +static bool pf_handle_vf_save(struct xe_gt *gt, unsigned int vfid) +{ + int ret; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA)) + return false; + + if (xe_gt_sriov_pf_migration_ring_full(gt, vfid)) { + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA); + return true; + } + + ret = pf_handle_vf_save_data(gt, vfid); + if (ret == -EAGAIN) + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA); + else if (ret) + pf_enter_vf_save_failed(gt, vfid); + else + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE); + + return true; +} + +static void pf_exit_vf_save_wait_data(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA)) + return; + + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA); + pf_queue_vf(gt, vfid); +} + +static bool pf_enter_vf_save_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + xe_gt_sriov_pf_migration_save_init(gt, vfid); + pf_enter_vf_wip(gt, vfid); + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA); + pf_queue_vf(gt, vfid); + return true; + } + + return false; +} + +/** + * xe_gt_sriov_pf_control_check_save_data_done() - Check if all save migration data was produced. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: true if all migration data was produced, false otherwise. + */ +bool xe_gt_sriov_pf_control_check_save_data_done(struct xe_gt *gt, unsigned int vfid) +{ + return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE); +} + +/** + * xe_gt_sriov_pf_control_check_save_failed() - Check if save processing has failed. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: true if save processing failed, false otherwise. + */ +bool xe_gt_sriov_pf_control_check_save_failed(struct xe_gt *gt, unsigned int vfid) +{ + return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED); +} + +/** + * xe_gt_sriov_pf_control_process_save_data() - Queue VF save migration data processing. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_process_save_data(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED)) + return -EIO; + + pf_exit_vf_save_wait_data(gt, vfid); + + return 0; +} + +/** + * xe_gt_sriov_pf_control_trigger_save_vf() - Start an SR-IOV VF migration data save sequence. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_trigger_save_vf(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) { + xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid); + return -EPERM; + } + + if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) { + xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid); + return -EPERM; + } + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + xe_gt_sriov_dbg(gt, "VF%u restore is in progress!\n", vfid); + return -EBUSY; + } + + if (!pf_enter_vf_save_wip(gt, vfid)) { + xe_gt_sriov_dbg(gt, "VF%u save already in progress!\n", vfid); + return -EALREADY; + } + + return 0; +} + +/** + * xe_gt_sriov_pf_control_finish_save_vf() - Complete a VF migration data save sequence. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE)) { + xe_gt_sriov_err(gt, "VF%u save is still in progress!\n", vfid); + return -EIO; + } + + pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE); + pf_enter_vf_saved(gt, vfid); + + return 0; +} + +/** + * DOC: The VF RESTORE state machine + * + * RESTORE extends the PAUSED state. + * + * The VF RESTORE state machine looks like:: + * + * ....PAUSED.................................................... + * : : + * : (O)<---------o : + * : | \ : + * : restore (RESTORED) (RESTORE_FAILED) : + * : | ^ ^ : + * : | | | : + * : ....V...............o...........o......RESTORE_WIP...... : + * : : | | | : : + * : : | empty | : : + * : : | | | : : + * : : | | | : : + * : : | DATA_DONE | : : + * : : | ^ | : : + * : : | | error : : + * : : | trailer / : : + * : : | / / : : + * : : | / / : : + * : : | / / : : + * : : o---------->PROCESS_DATA<----produce : : + * : : \ \ : : + * : : \ \ : : + * : : \ \ : : + * : : ring_empty---->WAIT_DATA : : + * : : : : + * : :......................................................: : + * :............................................................: + * + * For the full state machine view, see `The VF state machine`_. + */ + +static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + xe_gt_sriov_pf_migration_ring_free(gt, vfid); + + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE); + } +} + +static void pf_enter_vf_restored(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED)) + pf_enter_vf_state_machine_bug(gt, vfid); + + xe_gt_sriov_dbg(gt, "VF%u restored!\n", vfid); + + pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_mismatch(gt, vfid); + pf_exit_vf_wip(gt, vfid); +} + +static void pf_enter_vf_restore_failed(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED)) + pf_enter_vf_state_machine_bug(gt, vfid); + + wake_up_all(xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid)); + + pf_exit_vf_wip(gt, vfid); +} + +static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_sriov_packet *data = xe_gt_sriov_pf_migration_restore_consume(gt, vfid); + int ret = 0; + + switch (data->hdr.type) { + case XE_SRIOV_PACKET_TYPE_GGTT: + ret = xe_gt_sriov_pf_migration_ggtt_restore(gt, vfid, data); + break; + case XE_SRIOV_PACKET_TYPE_MMIO: + ret = xe_gt_sriov_pf_migration_mmio_restore(gt, vfid, data); + break; + case XE_SRIOV_PACKET_TYPE_GUC: + ret = xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data); + break; + case XE_SRIOV_PACKET_TYPE_VRAM: + ret = xe_gt_sriov_pf_migration_vram_restore(gt, vfid, data); + break; + default: + xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n", + vfid, data->hdr.type); + break; + } + + xe_sriov_packet_free(data); + + return ret; +} + +static bool pf_handle_vf_restore(struct xe_gt *gt, unsigned int vfid) +{ + int ret; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA)) + return false; + + if (xe_gt_sriov_pf_migration_ring_empty(gt, vfid)) { + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE)) + pf_enter_vf_restored(gt, vfid); + else + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA); + + return true; + } + + ret = pf_handle_vf_restore_data(gt, vfid); + if (ret) + pf_enter_vf_restore_failed(gt, vfid); + else + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA); + + return true; +} + +static void pf_exit_vf_restore_wait_data(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA)) + return; + + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA); + pf_queue_vf(gt, vfid); +} + +static bool pf_enter_vf_restore_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + pf_enter_vf_wip(gt, vfid); + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA); + pf_queue_vf(gt, vfid); + return true; + } + + return false; +} + +/** + * xe_gt_sriov_pf_control_check_restore_failed() - Check if restore processing has failed. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: true if restore processing failed, false otherwise. + */ +bool xe_gt_sriov_pf_control_check_restore_failed(struct xe_gt *gt, unsigned int vfid) +{ + return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED); +} + +/** + * xe_gt_sriov_pf_control_restore_data_done() - Indicate the end of VF migration data stream. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_restore_data_done(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE)) { + pf_enter_vf_state_machine_bug(gt, vfid); + return -EIO; + } + + return xe_gt_sriov_pf_control_process_restore_data(gt, vfid); +} + +/** + * xe_gt_sriov_pf_control_process_restore_data() - Queue VF restore migration data processing. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_process_restore_data(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED)) { + xe_gt_sriov_pf_migration_ring_free(gt, vfid); + return -EIO; + } + + pf_exit_vf_restore_wait_data(gt, vfid); + + return 0; +} + +/** + * xe_gt_sriov_pf_control_trigger restore_vf() - Start an SR-IOV VF migration data restore sequence. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_trigger_restore_vf(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) { + xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid); + return -EPERM; + } + + if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) { + xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid); + return -EPERM; + } + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + xe_gt_sriov_dbg(gt, "VF%u save is in progress!\n", vfid); + return -EBUSY; + } + + if (!pf_enter_vf_restore_wip(gt, vfid)) { + xe_gt_sriov_dbg(gt, "VF%u restore already in progress!\n", vfid); + return -EALREADY; + } + + return 0; +} + +static int pf_wait_vf_restore_done(struct xe_gt *gt, unsigned int vfid) +{ + unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_RESTORE_WIP); + int err; + + err = pf_wait_vf_wip_done(gt, vfid, timeout); + if (err) { + xe_gt_sriov_notice(gt, "VF%u RESTORE didn't finish in %u ms (%pe)\n", + vfid, jiffies_to_msecs(timeout), ERR_PTR(err)); + return err; + } + + if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED)) + return -EIO; + + return 0; +} + +/** + * xe_gt_sriov_pf_control_finish_restore_vf() - Complete a VF migration data restore sequence. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid) +{ + int ret; + + ret = pf_wait_vf_restore_done(gt, vfid); + if (ret) + return ret; + + if (!pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED)) { + pf_enter_vf_mismatch(gt, vfid); + return -EIO; + } + + pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + + return 0; +} + +/** * DOC: The VF STOP state machine * * The VF STOP state machine looks like:: @@ -809,6 +1378,8 @@ static void pf_enter_vf_stopped(struct xe_gt *gt, unsigned int vfid) pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED); pf_exit_vf_mismatch(gt, vfid); pf_exit_vf_wip(gt, vfid); } @@ -896,7 +1467,7 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid) return err; if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) { - xe_gt_sriov_info(gt, "VF%u stopped!\n", vfid); + xe_gt_sriov_dbg(gt, "VF%u stopped!\n", vfid); return 0; } @@ -934,6 +1505,10 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid) * : v : | | * : FLR_GUC_DONE : | | * : | : | | + * : | o--<--sync : | | + * : |/ / : | | + * : FLR_SYNC--o : | | + * : | : | | * : FLR_RESET_CONFIG---failed--->-----------o--------+-----------o * : | : | | * : FLR_RESET_DATA : | | @@ -985,6 +1560,8 @@ static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid) pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START); + + xe_sriov_pf_control_sync_flr(gt_to_xe(gt), vfid); } } @@ -1141,12 +1718,38 @@ static bool pf_exit_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid) return true; } +static bool pf_exit_vf_flr_sync(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC)) + return false; + + pf_enter_vf_flr_reset_config(gt, vfid); + return true; +} + +static void pf_enter_vf_flr_sync(struct xe_gt *gt, unsigned int vfid) +{ + int ret; + + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC)) + pf_enter_vf_state_machine_bug(gt, vfid); + + ret = xe_sriov_pf_control_sync_flr(gt_to_xe(gt), vfid); + if (ret < 0) { + xe_gt_sriov_dbg_verbose(gt, "FLR checkpoint %pe\n", ERR_PTR(ret)); + pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC); + } else { + xe_gt_sriov_dbg_verbose(gt, "FLR checkpoint pass\n"); + pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC); + } +} + static bool pf_exit_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid) { if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE)) return false; - pf_enter_vf_flr_reset_config(gt, vfid); + pf_enter_vf_flr_sync(gt, vfid); return true; } @@ -1167,10 +1770,52 @@ static void pf_enter_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid) */ int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid) { + pf_enter_vf_flr_wip(gt, vfid); + + return 0; +} + +/** + * xe_gt_sriov_pf_control_sync_flr() - Synchronize on the VF FLR checkpoint. + * @gt: the &xe_gt + * @vfid: the VF identifier + * @sync: if true it will allow to exit the checkpoint + * + * Return: non-zero if FLR checkpoint has been reached, zero if the is no FLR + * in progress, or a negative error code on the FLR busy or failed. + */ +int xe_gt_sriov_pf_control_sync_flr(struct xe_gt *gt, unsigned int vfid, bool sync) +{ + if (sync && pf_exit_vf_flr_sync(gt, vfid)) + return 1; + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC)) + return 1; + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) + return -EBUSY; + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED)) + return -EIO; + return 0; +} + +/** + * xe_gt_sriov_pf_control_wait_flr() - Wait for a VF FLR to complete. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_wait_flr(struct xe_gt *gt, unsigned int vfid) +{ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_WIP); int err; - pf_enter_vf_flr_wip(gt, vfid); + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED)) + return -EIO; + + if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) + return 0; err = pf_wait_vf_wip_done(gt, vfid, timeout); if (err) { @@ -1378,7 +2023,22 @@ static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid) if (pf_exit_vf_pause_guc_done(gt, vfid)) return true; - if (pf_exit_vf_pause_save_guc(gt, vfid)) + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA)) { + xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid, + control_bit_to_string(XE_GT_SRIOV_STATE_SAVE_WAIT_DATA)); + return false; + } + + if (pf_handle_vf_save(gt, vfid)) + return true; + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA)) { + xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid, + control_bit_to_string(XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA)); + return false; + } + + if (pf_handle_vf_restore(gt, vfid)) return true; if (pf_exit_vf_resume_send_resume(gt, vfid)) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h index c85e64f099cc..c36c8767f3ad 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h @@ -16,8 +16,20 @@ void xe_gt_sriov_pf_control_restart(struct xe_gt *gt); int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid); +bool xe_gt_sriov_pf_control_check_save_data_done(struct xe_gt *gt, unsigned int vfid); +bool xe_gt_sriov_pf_control_check_save_failed(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_process_save_data(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_trigger_save_vf(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_restore_data_done(struct xe_gt *gt, unsigned int vfid); +bool xe_gt_sriov_pf_control_check_restore_failed(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_process_restore_data(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_trigger_restore_vf(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_sync_flr(struct xe_gt *gt, unsigned int vfid, bool sync); +int xe_gt_sriov_pf_control_wait_flr(struct xe_gt *gt, unsigned int vfid); #ifdef CONFIG_PCI_IOV int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h index f02f941b4ad2..6027ba05a7f2 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h @@ -18,6 +18,7 @@ * @XE_GT_SRIOV_STATE_FLR_SEND_START: indicates that the PF wants to send a FLR START command. * @XE_GT_SRIOV_STATE_FLR_WAIT_GUC: indicates that the PF awaits for a response from the GuC. * @XE_GT_SRIOV_STATE_FLR_GUC_DONE: indicates that the PF has received a response from the GuC. + * @XE_GT_SRIOV_STATE_FLR_SYNC: indicates that the PF awaits to synchronize with other GuCs. * @XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: indicates that the PF needs to clear VF's resources. * @XE_GT_SRIOV_STATE_FLR_RESET_DATA: indicates that the PF needs to clear VF's data. * @XE_GT_SRIOV_STATE_FLR_RESET_MMIO: indicates that the PF needs to reset VF's registers. @@ -27,9 +28,20 @@ * @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command. * @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC. * @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC. - * @XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC: indicates that the PF needs to save the VF GuC state. * @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed. * @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused. + * @XE_GT_SRIOV_STATE_SAVE_WIP: indicates that VF save operation is in progress. + * @XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA: indicates that VF migration data is being produced. + * @XE_GT_SRIOV_STATE_SAVE_WAIT_DATA: indicates that PF awaits for space in migration data ring. + * @XE_GT_SRIOV_STATE_SAVE_DATA_DONE: indicates that all migration data was produced by Xe. + * @XE_GT_SRIOV_STATE_SAVE_FAILED: indicates that VF save operation has failed. + * @XE_GT_SRIOV_STATE_SAVED: indicates that VF data is saved. + * @XE_GT_SRIOV_STATE_RESTORE_WIP: indicates that VF restore operation is in progress. + * @XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA: indicates that VF migration data is being consumed. + * @XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA: indicates that PF awaits for data in migration data ring. + * @XE_GT_SRIOV_STATE_RESTORE_DATA_DONE: indicates that all migration data was produced by the user. + * @XE_GT_SRIOV_STATE_RESTORE_FAILED: indicates that VF restore operation has failed. + * @XE_GT_SRIOV_STATE_RESTORED: indicates that VF data is restored. * @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress. * @XE_GT_SRIOV_STATE_RESUME_SEND_RESUME: indicates that the PF is about to send RESUME command. * @XE_GT_SRIOV_STATE_RESUME_FAILED: indicates that a VF resume operation has failed. @@ -47,6 +59,7 @@ enum xe_gt_sriov_control_bits { XE_GT_SRIOV_STATE_FLR_SEND_START, XE_GT_SRIOV_STATE_FLR_WAIT_GUC, XE_GT_SRIOV_STATE_FLR_GUC_DONE, + XE_GT_SRIOV_STATE_FLR_SYNC, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG, XE_GT_SRIOV_STATE_FLR_RESET_DATA, XE_GT_SRIOV_STATE_FLR_RESET_MMIO, @@ -57,10 +70,23 @@ enum xe_gt_sriov_control_bits { XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE, - XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC, XE_GT_SRIOV_STATE_PAUSE_FAILED, XE_GT_SRIOV_STATE_PAUSED, + XE_GT_SRIOV_STATE_SAVE_WIP, + XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA, + XE_GT_SRIOV_STATE_SAVE_WAIT_DATA, + XE_GT_SRIOV_STATE_SAVE_DATA_DONE, + XE_GT_SRIOV_STATE_SAVE_FAILED, + XE_GT_SRIOV_STATE_SAVED, + + XE_GT_SRIOV_STATE_RESTORE_WIP, + XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA, + XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA, + XE_GT_SRIOV_STATE_RESTORE_DATA_DONE, + XE_GT_SRIOV_STATE_RESTORE_FAILED, + XE_GT_SRIOV_STATE_RESTORED, + XE_GT_SRIOV_STATE_RESUME_WIP, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME, XE_GT_SRIOV_STATE_RESUME_FAILED, @@ -71,9 +97,11 @@ enum xe_gt_sriov_control_bits { XE_GT_SRIOV_STATE_STOP_FAILED, XE_GT_SRIOV_STATE_STOPPED, - XE_GT_SRIOV_STATE_MISMATCH = BITS_PER_LONG - 1, + XE_GT_SRIOV_STATE_MISMATCH, /* always keep as last */ }; +#define XE_GT_SRIOV_NUM_STATES (XE_GT_SRIOV_STATE_MISMATCH + 1) + /** * struct xe_gt_sriov_control_state - GT-level per-VF control state. * @@ -81,7 +109,7 @@ enum xe_gt_sriov_control_bits { */ struct xe_gt_sriov_control_state { /** @state: VF state bits */ - unsigned long state; + DECLARE_BITMAP(state, XE_GT_SRIOV_NUM_STATES); /** @done: completion of async operations */ struct completion done; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c index 3ed245e04d0c..5278ea4fd655 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c @@ -23,14 +23,25 @@ #include "xe_gt_sriov_pf_service.h" #include "xe_pm.h" #include "xe_sriov_pf.h" +#include "xe_sriov_pf_provision.h" /* - * /sys/kernel/debug/dri/0/ - * ├── gt0 # d_inode->i_private = gt - * │ ├── pf # d_inode->i_private = gt - * │ ├── vf1 # d_inode->i_private = VFID(1) - * : : - * │ ├── vfN # d_inode->i_private = VFID(N) + * /sys/kernel/debug/dri/BDF/ + * ├── sriov # d_inode->i_private = (xe_device*) + * │ ├── pf # d_inode->i_private = (xe_device*) + * │ │ ├── tile0 # d_inode->i_private = (xe_tile*) + * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*) + * │ │ │ ├── gt1 # d_inode->i_private = (xe_gt*) + * │ │ ├── tile1 + * │ │ │ : + * │ ├── vf1 # d_inode->i_private = VFID(1) + * │ │ ├── tile0 # d_inode->i_private = (xe_tile*) + * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*) + * │ │ │ ├── gt1 # d_inode->i_private = (xe_gt*) + * │ │ ├── tile1 + * │ │ │ : + * : : + * │ ├── vfN # d_inode->i_private = VFID(N) */ static void *extract_priv(struct dentry *d) @@ -40,26 +51,31 @@ static void *extract_priv(struct dentry *d) static struct xe_gt *extract_gt(struct dentry *d) { - return extract_priv(d->d_parent); + return extract_priv(d); +} + +static struct xe_device *extract_xe(struct dentry *d) +{ + return extract_priv(d->d_parent->d_parent->d_parent); } static unsigned int extract_vfid(struct dentry *d) { - return extract_priv(d) == extract_gt(d) ? PFID : (uintptr_t)extract_priv(d); + void *priv = extract_priv(d->d_parent->d_parent); + + return priv == extract_xe(d) ? PFID : (uintptr_t)priv; } /* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── pf - * │ │ ├── contexts_provisioned - * │ │ ├── doorbells_provisioned - * │ │ ├── runtime_registers - * │ │ ├── negotiated_versions - * │ │ ├── adverse_events - * ├── gt1 - * │ ├── pf - * │ │ ├── ... + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * : ├── pf + * : ├── tile0 + * : ├── gt0 + * : ├── contexts_provisioned + * ├── doorbells_provisioned + * ├── runtime_registers + * ├── adverse_events */ static const struct drm_info_list pf_info[] = { @@ -86,48 +102,14 @@ static const struct drm_info_list pf_info[] = { }; /* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── pf - * │ │ ├── ggtt_available - * │ │ ├── ggtt_provisioned - */ - -static const struct drm_info_list pf_ggtt_info[] = { - { - "ggtt_available", - .show = xe_gt_debugfs_simple_show, - .data = xe_gt_sriov_pf_config_print_available_ggtt, - }, - { - "ggtt_provisioned", - .show = xe_gt_debugfs_simple_show, - .data = xe_gt_sriov_pf_config_print_ggtt, - }, -}; - -/* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── pf - * │ │ ├── lmem_provisioned - */ - -static const struct drm_info_list pf_lmem_info[] = { - { - "lmem_provisioned", - .show = xe_gt_debugfs_simple_show, - .data = xe_gt_sriov_pf_config_print_lmem, - }, -}; - -/* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── pf - * │ │ ├── reset_engine - * │ │ ├── sample_period - * │ │ ├── sched_if_idle + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * : ├── pf + * : ├── tile0 + * : ├── gt0 + * : ├── reset_engine + * ├── sample_period + * ├── sched_if_idle */ #define DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(POLICY, TYPE, FORMAT) \ @@ -143,6 +125,8 @@ static int POLICY##_set(void *data, u64 val) \ \ xe_pm_runtime_get(xe); \ err = xe_gt_sriov_pf_policy_set_##POLICY(gt, val); \ + if (!err) \ + xe_sriov_pf_provision_set_custom_mode(xe); \ xe_pm_runtime_put(xe); \ \ return err; \ @@ -173,24 +157,24 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent) } /* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── pf - * │ │ ├── ggtt_spare - * │ │ ├── lmem_spare - * │ │ ├── doorbells_spare - * │ │ ├── contexts_spare - * │ │ ├── exec_quantum_ms - * │ │ ├── preempt_timeout_us - * │ │ ├── sched_priority - * │ ├── vf1 - * │ │ ├── ggtt_quota - * │ │ ├── lmem_quota - * │ │ ├── doorbells_quota - * │ │ ├── contexts_quota - * │ │ ├── exec_quantum_ms - * │ │ ├── preempt_timeout_us - * │ │ ├── sched_priority + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * : ├── pf + * │ ├── tile0 + * │ : ├── gt0 + * │ : ├── doorbells_spare + * │ ├── contexts_spare + * │ ├── exec_quantum_ms + * │ ├── preempt_timeout_us + * │ ├── sched_priority + * ├── vf1 + * : ├── tile0 + * : ├── gt0 + * : ├── doorbells_quota + * ├── contexts_quota + * ├── exec_quantum_ms + * ├── preempt_timeout_us + * ├── sched_priority */ #define DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(CONFIG, TYPE, FORMAT) \ @@ -208,6 +192,8 @@ static int CONFIG##_set(void *data, u64 val) \ xe_pm_runtime_get(xe); \ err = xe_sriov_pf_wait_ready(xe) ?: \ xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \ + if (!err) \ + xe_sriov_pf_provision_set_custom_mode(xe); \ xe_pm_runtime_put(xe); \ \ return err; \ @@ -224,8 +210,6 @@ static int CONFIG##_get(void *data, u64 *val) \ \ DEFINE_DEBUGFS_ATTRIBUTE(CONFIG##_fops, CONFIG##_get, CONFIG##_set, FORMAT) -DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ggtt, u64, "%llu\n"); -DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(lmem, u64, "%llu\n"); DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ctxs, u32, "%llu\n"); DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(dbs, u32, "%llu\n"); DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(exec_quantum, u32, "%llu\n"); @@ -233,22 +217,26 @@ DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(preempt_timeout, u32, "%llu\n"); DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(sched_priority, u32, "%llu\n"); /* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── pf - * │ │ ├── threshold_cat_error_count - * │ │ ├── threshold_doorbell_time_us - * │ │ ├── threshold_engine_reset_count - * │ │ ├── threshold_guc_time_us - * │ │ ├── threshold_irq_time_us - * │ │ ├── threshold_page_fault_count - * │ ├── vf1 - * │ │ ├── threshold_cat_error_count - * │ │ ├── threshold_doorbell_time_us - * │ │ ├── threshold_engine_reset_count - * │ │ ├── threshold_guc_time_us - * │ │ ├── threshold_irq_time_us - * │ │ ├── threshold_page_fault_count + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * : ├── pf + * │ ├── tile0 + * │ : ├── gt0 + * │ : ├── threshold_cat_error_count + * │ ├── threshold_doorbell_time_us + * │ ├── threshold_engine_reset_count + * │ ├── threshold_guc_time_us + * │ ├── threshold_irq_time_us + * │ ├── threshold_page_fault_count + * ├── vf1 + * : ├── tile0 + * : ├── gt0 + * : ├── threshold_cat_error_count + * ├── threshold_doorbell_time_us + * ├── threshold_engine_reset_count + * ├── threshold_guc_time_us + * ├── threshold_irq_time_us + * ├── threshold_page_fault_count */ static int set_threshold(void *data, u64 val, enum xe_guc_klv_threshold_index index) @@ -263,6 +251,8 @@ static int set_threshold(void *data, u64 val, enum xe_guc_klv_threshold_index in xe_pm_runtime_get(xe); err = xe_gt_sriov_pf_config_set_threshold(gt, vfid, index, val); + if (!err) + xe_sriov_pf_provision_set_custom_mode(xe); xe_pm_runtime_put(xe); return err; @@ -302,13 +292,6 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne xe_gt_assert(gt, gt == extract_gt(parent)); xe_gt_assert(gt, vfid == extract_vfid(parent)); - if (xe_gt_is_main_type(gt)) { - debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare", - 0644, parent, parent, &ggtt_fops); - if (xe_device_has_lmtt(gt_to_xe(gt))) - debugfs_create_file_unsafe(vfid ? "lmem_quota" : "lmem_spare", - 0644, parent, parent, &lmem_fops); - } debugfs_create_file_unsafe(vfid ? "doorbells_quota" : "doorbells_spare", 0644, parent, parent, &dbs_fops); debugfs_create_file_unsafe(vfid ? "contexts_quota" : "contexts_spare", @@ -329,10 +312,12 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne } /* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── vf1 - * │ │ ├── control { stop, pause, resume } + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * : ├── vf1 + * : ├── tile0 + * : ├── gt0 + * : ├── control { stop, pause, resume } */ static const struct { @@ -342,9 +327,6 @@ static const struct { { "stop", xe_gt_sriov_pf_control_stop_vf }, { "pause", xe_gt_sriov_pf_control_pause_vf }, { "resume", xe_gt_sriov_pf_control_resume_vf }, -#ifdef CONFIG_DRM_XE_DEBUG_SRIOV - { "restore!", xe_gt_sriov_pf_migration_restore_guc_state }, -#endif }; static ssize_t control_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) @@ -409,58 +391,27 @@ static const struct file_operations control_ops = { }; /* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── vf1 - * │ │ ├── guc_state + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * : ├── vf1 + * : ├── tile0 + * : ├── gt0 + * : ├── config_blob */ -static ssize_t guc_state_read(struct file *file, char __user *buf, - size_t count, loff_t *pos) -{ - struct dentry *dent = file_dentry(file); - struct dentry *parent = dent->d_parent; - struct xe_gt *gt = extract_gt(parent); - unsigned int vfid = extract_vfid(parent); - return xe_gt_sriov_pf_migration_read_guc_state(gt, vfid, buf, count, pos); -} - -static ssize_t guc_state_write(struct file *file, const char __user *buf, - size_t count, loff_t *pos) -{ - struct dentry *dent = file_dentry(file); - struct dentry *parent = dent->d_parent; - struct xe_gt *gt = extract_gt(parent); - unsigned int vfid = extract_vfid(parent); - - if (*pos) - return -EINVAL; - - return xe_gt_sriov_pf_migration_write_guc_state(gt, vfid, buf, count); -} - -static const struct file_operations guc_state_ops = { - .owner = THIS_MODULE, - .read = guc_state_read, - .write = guc_state_write, - .llseek = default_llseek, +struct config_blob_data { + size_t size; + u8 blob[]; }; -/* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── vf1 - * │ │ ├── config_blob - */ -static ssize_t config_blob_read(struct file *file, char __user *buf, - size_t count, loff_t *pos) +static int config_blob_open(struct inode *inode, struct file *file) { struct dentry *dent = file_dentry(file); struct dentry *parent = dent->d_parent; struct xe_gt *gt = extract_gt(parent); unsigned int vfid = extract_vfid(parent); + struct config_blob_data *cbd; ssize_t ret; - void *tmp; ret = xe_gt_sriov_pf_config_save(gt, vfid, NULL, 0); if (!ret) @@ -468,16 +419,27 @@ static ssize_t config_blob_read(struct file *file, char __user *buf, if (ret < 0) return ret; - tmp = kzalloc(ret, GFP_KERNEL); - if (!tmp) + cbd = kzalloc(struct_size(cbd, blob, ret), GFP_KERNEL); + if (!cbd) return -ENOMEM; - ret = xe_gt_sriov_pf_config_save(gt, vfid, tmp, ret); - if (ret > 0) - ret = simple_read_from_buffer(buf, count, pos, tmp, ret); + ret = xe_gt_sriov_pf_config_save(gt, vfid, cbd->blob, ret); + if (ret < 0) { + kfree(cbd); + return ret; + } + + cbd->size = ret; + file->private_data = cbd; + return nonseekable_open(inode, file); +} - kfree(tmp); - return ret; +static ssize_t config_blob_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct config_blob_data *cbd = file->private_data; + + return simple_read_from_buffer(buf, count, pos, cbd->blob, cbd->size); } static ssize_t config_blob_write(struct file *file, const char __user *buf, @@ -514,80 +476,147 @@ static ssize_t config_blob_write(struct file *file, const char __user *buf, return ret; } +static int config_blob_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + static const struct file_operations config_blob_ops = { .owner = THIS_MODULE, + .open = config_blob_open, .read = config_blob_read, .write = config_blob_write, - .llseek = default_llseek, + .release = config_blob_release, }; -/** - * xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs. - * @gt: the &xe_gt to register - * @root: the &dentry that represents the GT directory - * - * Register SR-IOV PF entries that are GT related and must be shown under GT debugfs. - */ -void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root) +static void pf_add_compat_attrs(struct xe_gt *gt, struct dentry *dent, unsigned int vfid) { struct xe_device *xe = gt_to_xe(gt); - struct drm_minor *minor = xe->drm.primary; - int n, totalvfs = xe_sriov_pf_get_totalvfs(xe); - struct dentry *pfdentry; - struct dentry *vfdentry; - char buf[14]; /* should be enough up to "vf%u\0" for 2^32 - 1 */ - - xe_gt_assert(gt, IS_SRIOV_PF(xe)); - xe_gt_assert(gt, root->d_inode->i_private == gt); - /* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── pf - */ - pfdentry = debugfs_create_dir("pf", root); - if (IS_ERR(pfdentry)) + if (!xe_gt_is_main_type(gt)) return; - pfdentry->d_inode->i_private = gt; - - drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor); - if (xe_gt_is_main_type(gt)) { - drm_debugfs_create_files(pf_ggtt_info, - ARRAY_SIZE(pf_ggtt_info), - pfdentry, minor); - if (xe_device_has_lmtt(gt_to_xe(gt))) - drm_debugfs_create_files(pf_lmem_info, - ARRAY_SIZE(pf_lmem_info), - pfdentry, minor); + + if (vfid) { + debugfs_create_symlink("ggtt_quota", dent, "../ggtt_quota"); + if (xe_device_has_lmtt(xe)) + debugfs_create_symlink("lmem_quota", dent, "../vram_quota"); + } else { + debugfs_create_symlink("ggtt_spare", dent, "../ggtt_spare"); + debugfs_create_symlink("ggtt_available", dent, "../ggtt_available"); + debugfs_create_symlink("ggtt_provisioned", dent, "../ggtt_provisioned"); + if (xe_device_has_lmtt(xe)) { + debugfs_create_symlink("lmem_spare", dent, "../vram_spare"); + debugfs_create_symlink("lmem_provisioned", dent, "../vram_provisioned"); + } } +} - pf_add_policy_attrs(gt, pfdentry); - pf_add_config_attrs(gt, pfdentry, PFID); - - for (n = 1; n <= totalvfs; n++) { - /* - * /sys/kernel/debug/dri/0/ - * ├── gt0 - * │ ├── vf1 - * │ ├── vf2 - */ - snprintf(buf, sizeof(buf), "vf%u", n); - vfdentry = debugfs_create_dir(buf, root); - if (IS_ERR(vfdentry)) - break; - vfdentry->d_inode->i_private = (void *)(uintptr_t)n; +static void pf_populate_gt(struct xe_gt *gt, struct dentry *dent, unsigned int vfid) +{ + struct xe_device *xe = gt_to_xe(gt); + struct drm_minor *minor = xe->drm.primary; - pf_add_config_attrs(gt, vfdentry, VFID(n)); - debugfs_create_file("control", 0600, vfdentry, NULL, &control_ops); + if (vfid) { + pf_add_config_attrs(gt, dent, vfid); + + debugfs_create_file("control", 0600, dent, NULL, &control_ops); /* for testing/debugging purposes only! */ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { - debugfs_create_file("guc_state", - IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400, - vfdentry, NULL, &guc_state_ops); debugfs_create_file("config_blob", IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400, - vfdentry, NULL, &config_blob_ops); + dent, NULL, &config_blob_ops); } + + } else { + pf_add_config_attrs(gt, dent, PFID); + pf_add_policy_attrs(gt, dent); + + drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), dent, minor); } + + /* for backward compatibility only */ + pf_add_compat_attrs(gt, dent, vfid); +} + +/** + * xe_gt_sriov_pf_debugfs_populate() - Create SR-IOV GT-level debugfs directories and files. + * @gt: the &xe_gt to register + * @parent: the parent &dentry that represents a &xe_tile + * @vfid: the VF identifier + * + * Add to the @parent directory new debugfs directory that will represent a @gt and + * populate it with GT files that are related to the SR-IOV @vfid function. + * + * This function can only be called on PF. + */ +void xe_gt_sriov_pf_debugfs_populate(struct xe_gt *gt, struct dentry *parent, unsigned int vfid) +{ + struct dentry *dent; + char name[8]; /* should be enough up to "gt%u\0" for 2^8 - 1 */ + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, extract_priv(parent) == gt->tile); + xe_gt_assert(gt, extract_priv(parent->d_parent) == gt_to_xe(gt) || + (uintptr_t)extract_priv(parent->d_parent) == vfid); + + /* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * │ ├── pf + * │ │ ├── tile0 # parent + * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*) + * │ │ │ ├── gt1 + * │ │ : : + * │ ├── vf1 + * │ │ ├── tile0 # parent + * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*) + * │ │ │ ├── gt1 + * │ : : : + */ + snprintf(name, sizeof(name), "gt%u", gt->info.id); + dent = debugfs_create_dir(name, parent); + if (IS_ERR(dent)) + return; + dent->d_inode->i_private = gt; + + xe_gt_assert(gt, extract_gt(dent) == gt); + xe_gt_assert(gt, extract_vfid(dent) == vfid); + + pf_populate_gt(gt, dent, vfid); +} + +static void pf_add_links(struct xe_gt *gt, struct dentry *dent) +{ + unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt); + unsigned int vfid; + char name[16]; /* should be more than enough for "vf%u\0" and VFID(UINT_MAX) */ + char symlink[64]; /* should be more enough for "../../sriov/vf%u/tile%u/gt%u\0" */ + + for (vfid = 0; vfid <= totalvfs; vfid++) { + if (vfid) + snprintf(name, sizeof(name), "vf%u", vfid); + else + snprintf(name, sizeof(name), "pf"); + snprintf(symlink, sizeof(symlink), "../../sriov/%s/tile%u/gt%u", + name, gt->tile->id, gt->info.id); + debugfs_create_symlink(name, dent, symlink); + } +} + +/** + * xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs. + * @gt: the &xe_gt to register + * @dent: the &dentry that represents the GT directory + * + * Instead of actual files, create symlinks for PF and each VF to their GT specific + * attributes that should be already exposed in the dedicated debugfs SR-IOV tree. + */ +void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *dent) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, dent->d_inode->i_private == gt); + + pf_add_links(gt, dent); } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h index 038cc8ddc244..82ff3b7f0532 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h @@ -11,6 +11,7 @@ struct dentry; #ifdef CONFIG_PCI_IOV void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root); +void xe_gt_sriov_pf_debugfs_populate(struct xe_gt *gt, struct dentry *parent, unsigned int vfid); #else static inline void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root) { } #endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index 44cc612b0a75..d5d918ddce4f 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -5,14 +5,149 @@ #include <drm/drm_managed.h> +#include "regs/xe_guc_regs.h" + #include "abi/guc_actions_sriov_abi.h" #include "xe_bo.h" +#include "xe_ggtt.h" +#include "xe_gt.h" +#include "xe_gt_sriov_pf.h" +#include "xe_gt_sriov_pf_config.h" +#include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_helpers.h" #include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_printk.h" -#include "xe_guc.h" +#include "xe_guc_buf.h" #include "xe_guc_ct.h" +#include "xe_migrate.h" +#include "xe_mmio.h" #include "xe_sriov.h" +#include "xe_sriov_packet.h" +#include "xe_sriov_packet_types.h" +#include "xe_sriov_pf_migration.h" + +#define XE_GT_SRIOV_PF_MIGRATION_RING_SIZE 5 + +static struct xe_gt_sriov_migration_data *pf_pick_gt_migration(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return >->sriov.pf.vfs[vfid].migration; +} + +static void pf_dump_mig_data(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data, + const char *what) +{ + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { + struct drm_printer p = xe_gt_dbg_printer(gt); + + drm_printf(&p, "VF%u %s (%llu bytes)\n", vfid, what, data->hdr.size); + drm_print_hex_dump(&p, "mig_hdr: ", (void *)&data->hdr, sizeof(data->hdr)); + drm_print_hex_dump(&p, "mig_data: ", data->vaddr, min(SZ_64, data->hdr.size)); + } +} + +static ssize_t pf_migration_ggtt_size(struct xe_gt *gt, unsigned int vfid) +{ + if (!xe_gt_is_main_type(gt)) + return 0; + + return xe_gt_sriov_pf_config_ggtt_save(gt, vfid, NULL, 0); +} + +static int pf_save_vf_ggtt_mig_data(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_sriov_packet *data; + size_t size; + int ret; + + size = pf_migration_ggtt_size(gt, vfid); + xe_gt_assert(gt, size); + + data = xe_sriov_packet_alloc(gt_to_xe(gt)); + if (!data) + return -ENOMEM; + + ret = xe_sriov_packet_init(data, gt->tile->id, gt->info.id, + XE_SRIOV_PACKET_TYPE_GGTT, 0, size); + if (ret) + goto fail; + + ret = xe_gt_sriov_pf_config_ggtt_save(gt, vfid, data->vaddr, size); + if (ret) + goto fail; + + pf_dump_mig_data(gt, vfid, data, "GGTT data save"); + + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data); + if (ret) + goto fail; + + return 0; + +fail: + xe_sriov_packet_free(data); + xe_gt_sriov_err(gt, "Failed to save VF%u GGTT data (%pe)\n", vfid, ERR_PTR(ret)); + return ret; +} + +static int pf_restore_vf_ggtt_mig_data(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data) +{ + int ret; + + pf_dump_mig_data(gt, vfid, data, "GGTT data restore"); + + ret = xe_gt_sriov_pf_config_ggtt_restore(gt, vfid, data->vaddr, data->hdr.size); + if (ret) { + xe_gt_sriov_err(gt, "Failed to restore VF%u GGTT data (%pe)\n", + vfid, ERR_PTR(ret)); + return ret; + } + + return 0; +} + +/** + * xe_gt_sriov_pf_migration_ggtt_save() - Save VF GGTT migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return pf_save_vf_ggtt_mig_data(gt, vfid); +} + +/** + * xe_gt_sriov_pf_migration_ggtt_restore() - Restore VF GGTT migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * @data: the &xe_sriov_packet containing migration data + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return pf_restore_vf_ggtt_mig_data(gt, vfid, data); +} /* Return: number of dwords saved/restored/required or a negative error code on failure */ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode, @@ -33,7 +168,7 @@ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode, } /* Return: size of the state in dwords or a negative error code on failure */ -static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid) +static int pf_send_guc_query_vf_mig_data_size(struct xe_gt *gt, unsigned int vfid) { int ret; @@ -42,353 +177,850 @@ static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid) } /* Return: number of state dwords saved or a negative error code on failure */ -static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid, - void *buff, size_t size) +static int pf_send_guc_save_vf_mig_data(struct xe_gt *gt, unsigned int vfid, + void *dst, size_t size) { const int ndwords = size / sizeof(u32); - struct xe_tile *tile = gt_to_tile(gt); - struct xe_device *xe = tile_to_xe(tile); struct xe_guc *guc = >->uc.guc; - struct xe_bo *bo; + CLASS(xe_guc_buf, buf)(&guc->buf, ndwords); int ret; xe_gt_assert(gt, size % sizeof(u32) == 0); xe_gt_assert(gt, size == ndwords * sizeof(u32)); - bo = xe_bo_create_pin_map_novm(xe, tile, - ALIGN(size, PAGE_SIZE), - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE, false); - if (IS_ERR(bo)) - return PTR_ERR(bo); + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; + + /* FW expects this buffer to be zero-initialized */ + memset(xe_guc_buf_cpu_ptr(buf), 0, size); ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_SAVE, - xe_bo_ggtt_addr(bo), ndwords); + xe_guc_buf_flush(buf), ndwords); if (!ret) ret = -ENODATA; else if (ret > ndwords) ret = -EPROTO; else if (ret > 0) - xe_map_memcpy_from(xe, buff, &bo->vmap, 0, ret * sizeof(u32)); + memcpy(dst, xe_guc_buf_sync_read(buf), ret * sizeof(u32)); - xe_bo_unpin_map_no_vm(bo); return ret; } /* Return: number of state dwords restored or a negative error code on failure */ -static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid, - const void *buff, size_t size) +static int pf_send_guc_restore_vf_mig_data(struct xe_gt *gt, unsigned int vfid, + const void *src, size_t size) { const int ndwords = size / sizeof(u32); - struct xe_tile *tile = gt_to_tile(gt); - struct xe_device *xe = tile_to_xe(tile); struct xe_guc *guc = >->uc.guc; - struct xe_bo *bo; + CLASS(xe_guc_buf_from_data, buf)(&guc->buf, src, size); int ret; xe_gt_assert(gt, size % sizeof(u32) == 0); xe_gt_assert(gt, size == ndwords * sizeof(u32)); - bo = xe_bo_create_pin_map_novm(xe, tile, - ALIGN(size, PAGE_SIZE), - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE, false); - if (IS_ERR(bo)) - return PTR_ERR(bo); - - xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size); + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE, - xe_bo_ggtt_addr(bo), ndwords); + xe_guc_buf_flush(buf), ndwords); if (!ret) ret = -ENODATA; else if (ret > ndwords) ret = -EPROTO; - xe_bo_unpin_map_no_vm(bo); return ret; } static bool pf_migration_supported(struct xe_gt *gt) { - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); - return gt->sriov.pf.migration.supported; + return xe_sriov_pf_migration_supported(gt_to_xe(gt)); } -static struct mutex *pf_migration_mutex(struct xe_gt *gt) +static int pf_save_vf_guc_mig_data(struct xe_gt *gt, unsigned int vfid) { - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); - return >->sriov.pf.migration.snapshot_lock; + struct xe_sriov_packet *data; + size_t size; + int ret; + + ret = pf_send_guc_query_vf_mig_data_size(gt, vfid); + if (ret < 0) + goto fail; + + size = ret * sizeof(u32); + + data = xe_sriov_packet_alloc(gt_to_xe(gt)); + if (!data) { + ret = -ENOMEM; + goto fail; + } + + ret = xe_sriov_packet_init(data, gt->tile->id, gt->info.id, + XE_SRIOV_PACKET_TYPE_GUC, 0, size); + if (ret) + goto fail_free; + + ret = pf_send_guc_save_vf_mig_data(gt, vfid, data->vaddr, size); + if (ret < 0) + goto fail_free; + size = ret * sizeof(u32); + xe_gt_assert(gt, size); + xe_gt_assert(gt, size <= data->hdr.size); + data->hdr.size = size; + data->remaining = size; + + pf_dump_mig_data(gt, vfid, data, "GuC data save"); + + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data); + if (ret) + goto fail_free; + + return 0; + +fail_free: + xe_sriov_packet_free(data); +fail: + xe_gt_sriov_err(gt, "Failed to save VF%u GuC data (%pe)\n", + vfid, ERR_PTR(ret)); + return ret; } -static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt, - unsigned int vfid) +static ssize_t pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid) +{ + ssize_t size; + + if (!pf_migration_supported(gt)) + return -ENOPKG; + + size = pf_send_guc_query_vf_mig_data_size(gt, vfid); + if (size >= 0) + size *= sizeof(u32); + + return size; +} + +/** + * xe_gt_sriov_pf_migration_guc_save() - Save VF GuC migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid) { xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); - lockdep_assert_held(pf_migration_mutex(gt)); - return >->sriov.pf.vfs[vfid].snapshot; + if (!pf_migration_supported(gt)) + return -ENOPKG; + + return pf_save_vf_guc_mig_data(gt, vfid); } -static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) +static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data) { - return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs; + int ret; + + xe_gt_assert(gt, data->hdr.size); + + pf_dump_mig_data(gt, vfid, data, "GuC data restore"); + + ret = pf_send_guc_restore_vf_mig_data(gt, vfid, data->vaddr, data->hdr.size); + if (ret < 0) + goto fail; + + return 0; + +fail: + xe_gt_sriov_err(gt, "Failed to restore VF%u GuC data (%pe)\n", + vfid, ERR_PTR(ret)); + return ret; } -static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) +/** + * xe_gt_sriov_pf_migration_guc_restore() - Restore VF GuC migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier + * @data: the &xe_sriov_packet containing migration data + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data) { - struct xe_device *xe = gt_to_xe(gt); + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); - drmm_kfree(&xe->drm, snapshot->guc.buff); - snapshot->guc.buff = NULL; - snapshot->guc.size = 0; + if (!pf_migration_supported(gt)) + return -ENOPKG; + + return pf_restore_vf_guc_state(gt, vfid, data); } -static int pf_alloc_guc_state(struct xe_gt *gt, - struct xe_gt_sriov_state_snapshot *snapshot, - size_t size) +static ssize_t pf_migration_mmio_size(struct xe_gt *gt, unsigned int vfid) { - struct xe_device *xe = gt_to_xe(gt); - void *p; - - pf_free_guc_state(gt, snapshot); + if (xe_gt_is_media_type(gt)) + return MED_VF_SW_FLAG_COUNT * sizeof(u32); + else + return VF_SW_FLAG_COUNT * sizeof(u32); +} - if (!size) - return -ENODATA; +static int pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size) +{ + struct xe_mmio mmio; + u32 *regs = buf; + int n; - if (size % sizeof(u32)) + if (size != pf_migration_mmio_size(gt, vfid)) return -EINVAL; - if (size > SZ_2M) - return -EFBIG; + xe_mmio_init_vf_view(&mmio, >->mmio, vfid); - p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL); - if (!p) - return -ENOMEM; + if (xe_gt_is_media_type(gt)) + for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++) + regs[n] = xe_mmio_read32(>->mmio, MED_VF_SW_FLAG(n)); + else + for (n = 0; n < VF_SW_FLAG_COUNT; n++) + regs[n] = xe_mmio_read32(>->mmio, VF_SW_FLAG(n)); - snapshot->guc.buff = p; - snapshot->guc.size = size; return 0; } -static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) +static int pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size) { - if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { - unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot); + const u32 *regs = buf; + struct xe_mmio mmio; + int n; - xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n", - vfid, snapshot->guc.size / sizeof(u32)); - print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET, - snapshot->guc.buff, min(SZ_64, snapshot->guc.size)); - } + if (size != pf_migration_mmio_size(gt, vfid)) + return -EINVAL; + + xe_mmio_init_vf_view(&mmio, >->mmio, vfid); + + if (xe_gt_is_media_type(gt)) + for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++) + xe_mmio_write32(>->mmio, MED_VF_SW_FLAG(n), regs[n]); + else + for (n = 0; n < VF_SW_FLAG_COUNT; n++) + xe_mmio_write32(>->mmio, VF_SW_FLAG(n), regs[n]); + + return 0; } -static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid) +static int pf_save_vf_mmio_mig_data(struct xe_gt *gt, unsigned int vfid) { - struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid); + struct xe_sriov_packet *data; size_t size; int ret; - ret = pf_send_guc_query_vf_state_size(gt, vfid); - if (ret < 0) + size = pf_migration_mmio_size(gt, vfid); + xe_gt_assert(gt, size); + + data = xe_sriov_packet_alloc(gt_to_xe(gt)); + if (!data) + return -ENOMEM; + + ret = xe_sriov_packet_init(data, gt->tile->id, gt->info.id, + XE_SRIOV_PACKET_TYPE_MMIO, 0, size); + if (ret) goto fail; - size = ret * sizeof(u32); - xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size); - ret = pf_alloc_guc_state(gt, snapshot, size); - if (ret < 0) + ret = pf_migration_mmio_save(gt, vfid, data->vaddr, size); + if (ret) goto fail; - ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size); - if (ret < 0) + pf_dump_mig_data(gt, vfid, data, "MMIO data save"); + + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data); + if (ret) goto fail; - size = ret * sizeof(u32); - xe_gt_assert(gt, size); - xe_gt_assert(gt, size <= snapshot->guc.size); - snapshot->guc.size = size; - pf_dump_guc_state(gt, snapshot); return 0; fail: - xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret)); - pf_free_guc_state(gt, snapshot); + xe_sriov_packet_free(data); + xe_gt_sriov_err(gt, "Failed to save VF%u MMIO data (%pe)\n", vfid, ERR_PTR(ret)); return ret; } +static int pf_restore_vf_mmio_mig_data(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data) +{ + int ret; + + pf_dump_mig_data(gt, vfid, data, "MMIO data restore"); + + ret = pf_migration_mmio_restore(gt, vfid, data->vaddr, data->hdr.size); + if (ret) { + xe_gt_sriov_err(gt, "Failed to restore VF%u MMIO data (%pe)\n", + vfid, ERR_PTR(ret)); + + return ret; + } + + return 0; +} + /** - * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot. + * xe_gt_sriov_pf_migration_mmio_save() - Save VF MMIO migration data. * @gt: the &xe_gt - * @vfid: the VF identifier + * @vfid: the VF identifier (can't be 0) * * This function is for PF only. * * Return: 0 on success or a negative error code on failure. */ -int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid) +int xe_gt_sriov_pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid) { - int err; + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return pf_save_vf_mmio_mig_data(gt, vfid); +} +/** + * xe_gt_sriov_pf_migration_mmio_restore() - Restore VF MMIO migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * @data: the &xe_sriov_packet containing migration data + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data) +{ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); xe_gt_assert(gt, vfid != PFID); xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); - if (!pf_migration_supported(gt)) - return -ENOPKG; + return pf_restore_vf_mmio_mig_data(gt, vfid, data); +} + +static ssize_t pf_migration_vram_size(struct xe_gt *gt, unsigned int vfid) +{ + if (!xe_gt_is_main_type(gt)) + return 0; + + return xe_gt_sriov_pf_config_get_lmem(gt, vfid); +} + +static struct dma_fence *__pf_save_restore_vram(struct xe_gt *gt, unsigned int vfid, + struct xe_bo *vram, u64 vram_offset, + struct xe_bo *sysmem, u64 sysmem_offset, + size_t size, bool save) +{ + struct dma_fence *ret = NULL; + struct drm_exec exec; + int err; + + drm_exec_init(&exec, 0, 0); + drm_exec_until_all_locked(&exec) { + err = drm_exec_lock_obj(&exec, &vram->ttm.base); + drm_exec_retry_on_contention(&exec); + if (err) { + ret = ERR_PTR(err); + goto err; + } + + err = drm_exec_lock_obj(&exec, &sysmem->ttm.base); + drm_exec_retry_on_contention(&exec); + if (err) { + ret = ERR_PTR(err); + goto err; + } + } + + ret = xe_migrate_vram_copy_chunk(vram, vram_offset, sysmem, sysmem_offset, size, + save ? XE_MIGRATE_COPY_TO_SRAM : XE_MIGRATE_COPY_TO_VRAM); - mutex_lock(pf_migration_mutex(gt)); - err = pf_save_vf_guc_state(gt, vfid); - mutex_unlock(pf_migration_mutex(gt)); +err: + drm_exec_fini(&exec); - return err; + return ret; } -static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid) +#define PF_VRAM_SAVE_RESTORE_TIMEOUT (5 * HZ) +static int pf_save_vram_chunk(struct xe_gt *gt, unsigned int vfid, + struct xe_bo *src_vram, u64 src_vram_offset, + size_t size) { - struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid); + struct xe_sriov_packet *data; + struct dma_fence *fence; int ret; - if (!snapshot->guc.size) - return -ENODATA; + data = xe_sriov_packet_alloc(gt_to_xe(gt)); + if (!data) + return -ENOMEM; - xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n", - snapshot->guc.size / sizeof(u32), vfid); - ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size); - if (ret < 0) + ret = xe_sriov_packet_init(data, gt->tile->id, gt->info.id, + XE_SRIOV_PACKET_TYPE_VRAM, src_vram_offset, + size); + if (ret) + goto fail; + + fence = __pf_save_restore_vram(gt, vfid, + src_vram, src_vram_offset, + data->bo, 0, size, true); + if (IS_ERR(fence)) { + ret = PTR_ERR(fence); + goto fail; + } + + ret = dma_fence_wait_timeout(fence, false, PF_VRAM_SAVE_RESTORE_TIMEOUT); + dma_fence_put(fence); + if (!ret) { + ret = -ETIME; + goto fail; + } + + pf_dump_mig_data(gt, vfid, data, "VRAM data save"); + + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data); + if (ret) goto fail; - xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid); return 0; fail: - xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret)); + xe_sriov_packet_free(data); + return ret; +} + +#define VF_VRAM_STATE_CHUNK_MAX_SIZE SZ_512M +static int pf_save_vf_vram_mig_data(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + loff_t *offset = &migration->save.vram_offset; + struct xe_bo *vram; + size_t vram_size, chunk_size; + int ret; + + vram = xe_gt_sriov_pf_config_get_lmem_obj(gt, vfid); + if (!vram) + return -ENXIO; + + vram_size = xe_bo_size(vram); + + xe_gt_assert(gt, *offset < vram_size); + + chunk_size = min(vram_size - *offset, VF_VRAM_STATE_CHUNK_MAX_SIZE); + + ret = pf_save_vram_chunk(gt, vfid, vram, *offset, chunk_size); + if (ret) + goto fail; + + *offset += chunk_size; + + xe_bo_put(vram); + + if (*offset < vram_size) + return -EAGAIN; + + return 0; + +fail: + xe_bo_put(vram); + xe_gt_sriov_err(gt, "Failed to save VF%u VRAM data (%pe)\n", vfid, ERR_PTR(ret)); + return ret; +} + +static int pf_restore_vf_vram_mig_data(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data) +{ + u64 end = data->hdr.offset + data->hdr.size; + struct dma_fence *fence; + struct xe_bo *vram; + size_t size; + int ret = 0; + + vram = xe_gt_sriov_pf_config_get_lmem_obj(gt, vfid); + if (!vram) + return -ENXIO; + + size = xe_bo_size(vram); + + if (end > size || end < data->hdr.size) { + ret = -EINVAL; + goto err; + } + + pf_dump_mig_data(gt, vfid, data, "VRAM data restore"); + + fence = __pf_save_restore_vram(gt, vfid, vram, data->hdr.offset, + data->bo, 0, data->hdr.size, false); + if (IS_ERR(fence)) { + ret = PTR_ERR(fence); + goto err; + } + + ret = dma_fence_wait_timeout(fence, false, PF_VRAM_SAVE_RESTORE_TIMEOUT); + dma_fence_put(fence); + if (!ret) { + ret = -ETIME; + goto err; + } + + xe_bo_put(vram); + + return 0; +err: + xe_bo_put(vram); + xe_gt_sriov_err(gt, "Failed to restore VF%u VRAM data (%pe)\n", vfid, ERR_PTR(ret)); return ret; } /** - * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state. + * xe_gt_sriov_pf_migration_vram_save() - Save VF VRAM migration data. * @gt: the &xe_gt - * @vfid: the VF identifier + * @vfid: the VF identifier (can't be 0) * * This function is for PF only. * * Return: 0 on success or a negative error code on failure. */ -int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid) +int xe_gt_sriov_pf_migration_vram_save(struct xe_gt *gt, unsigned int vfid) { - int ret; - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); xe_gt_assert(gt, vfid != PFID); xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); - if (!pf_migration_supported(gt)) - return -ENOPKG; + return pf_save_vf_vram_mig_data(gt, vfid); +} - mutex_lock(pf_migration_mutex(gt)); - ret = pf_restore_vf_guc_state(gt, vfid); - mutex_unlock(pf_migration_mutex(gt)); +/** + * xe_gt_sriov_pf_migration_vram_restore() - Restore VF VRAM migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * @data: the &xe_sriov_packet containing migration data + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_vram_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); - return ret; + return pf_restore_vf_vram_mig_data(gt, vfid, data); } -#ifdef CONFIG_DEBUG_FS /** - * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state. + * xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT. * @gt: the &xe_gt - * @vfid: the VF identifier - * @buf: the user space buffer to read to - * @count: the maximum number of bytes to read - * @pos: the current position in the buffer + * @vfid: the VF identifier (can't be 0) * * This function is for PF only. * - * This function reads up to @count bytes from the saved VF GuC state buffer - * at offset @pos into the user space address starting at @buf. - * - * Return: the number of bytes read or a negative error code on failure. + * Return: total migration data size in bytes or a negative error code on failure. */ -ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid, - char __user *buf, size_t count, loff_t *pos) +ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid) { - struct xe_gt_sriov_state_snapshot *snapshot; - ssize_t ret; + ssize_t total = 0; + ssize_t size; xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); xe_gt_assert(gt, vfid != PFID); xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); - if (!pf_migration_supported(gt)) - return -ENOPKG; + size = pf_migration_guc_size(gt, vfid); + if (size < 0) + return size; + if (size > 0) + size += sizeof(struct xe_sriov_packet_hdr); + total += size; + + size = pf_migration_ggtt_size(gt, vfid); + if (size < 0) + return size; + if (size > 0) + size += sizeof(struct xe_sriov_packet_hdr); + total += size; + + size = pf_migration_mmio_size(gt, vfid); + if (size < 0) + return size; + if (size > 0) + size += sizeof(struct xe_sriov_packet_hdr); + total += size; + + size = pf_migration_vram_size(gt, vfid); + if (size < 0) + return size; + if (size > 0) + size += sizeof(struct xe_sriov_packet_hdr); + total += size; + + return total; +} - mutex_lock(pf_migration_mutex(gt)); - snapshot = pf_pick_vf_snapshot(gt, vfid); - if (snapshot->guc.size) - ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff, - snapshot->guc.size); - else - ret = -ENODATA; - mutex_unlock(pf_migration_mutex(gt)); +/** + * xe_gt_sriov_pf_migration_ring_empty() - Check if a migration ring is empty. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * Return: true if the ring is empty, otherwise false. + */ +bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid) +{ + return ptr_ring_empty(&pf_pick_gt_migration(gt, vfid)->ring); +} - return ret; +/** + * xe_gt_sriov_pf_migration_ring_full() - Check if a migration ring is full. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * Return: true if the ring is full, otherwise false. + */ +bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid) +{ + return ptr_ring_full(&pf_pick_gt_migration(gt, vfid)->ring); } /** - * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state. + * xe_gt_sriov_pf_migration_ring_free() - Consume and free all data in migration ring * @gt: the &xe_gt * @vfid: the VF identifier - * @buf: the user space buffer with GuC VF state - * @size: the size of GuC VF state (in bytes) + */ +void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + struct xe_sriov_packet *data; + + if (ptr_ring_empty(&migration->ring)) + return; + + xe_gt_sriov_notice(gt, "VF%u unprocessed migration data left in the ring!\n", vfid); + + while ((data = ptr_ring_consume(&migration->ring))) + xe_sriov_packet_free(data); +} + +static void pf_migration_save_data_todo(struct xe_gt *gt, unsigned int vfid, + enum xe_sriov_packet_type type) +{ + set_bit(type, &pf_pick_gt_migration(gt, vfid)->save.data_remaining); +} + +/** + * xe_gt_sriov_pf_migration_save_init() - Initialize per-GT migration related data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + */ +void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + + migration->save.data_remaining = 0; + migration->save.vram_offset = 0; + + xe_gt_assert(gt, pf_migration_guc_size(gt, vfid) > 0); + pf_migration_save_data_todo(gt, vfid, XE_SRIOV_PACKET_TYPE_GUC); + + if (pf_migration_ggtt_size(gt, vfid) > 0) + pf_migration_save_data_todo(gt, vfid, XE_SRIOV_PACKET_TYPE_GGTT); + + xe_gt_assert(gt, pf_migration_mmio_size(gt, vfid) > 0); + pf_migration_save_data_todo(gt, vfid, XE_SRIOV_PACKET_TYPE_MMIO); + + if (pf_migration_vram_size(gt, vfid) > 0) + pf_migration_save_data_todo(gt, vfid, XE_SRIOV_PACKET_TYPE_VRAM); +} + +/** + * xe_gt_sriov_pf_migration_save_data_pending() - Check if migration data type needs to be saved. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * @type: the &xe_sriov_packet_type of data to be checked * - * This function is for PF only. + * Return: true if the data needs saving, otherwise false. + */ +bool xe_gt_sriov_pf_migration_save_data_pending(struct xe_gt *gt, unsigned int vfid, + enum xe_sriov_packet_type type) +{ + return test_bit(type, &pf_pick_gt_migration(gt, vfid)->save.data_remaining); +} + +/** + * xe_gt_sriov_pf_migration_save_data_complete() - Complete migration data type save. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * @type: the &xe_sriov_packet_type to be marked as completed. + */ +void xe_gt_sriov_pf_migration_save_data_complete(struct xe_gt *gt, unsigned int vfid, + enum xe_sriov_packet_type type) +{ + clear_bit(type, &pf_pick_gt_migration(gt, vfid)->save.data_remaining); +} + +/** + * xe_gt_sriov_pf_migration_save_produce() - Add VF save data packet to migration ring. + * @gt: the &xe_gt + * @vfid: the VF identifier + * @data: the &xe_sriov_packet * - * This function reads @size bytes of the VF GuC state stored at user space - * address @buf and writes it into a internal VF state buffer. + * Called by the save migration data producer (PF SR-IOV Control worker) when + * processing migration data. + * Wakes up the save migration data consumer (userspace), that is potentially + * waiting for data when the ring was empty. * - * Return: the number of bytes used or a negative error code on failure. + * Return: 0 on success or a negative error code on failure. */ -ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid, - const char __user *buf, size_t size) +int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data) { - struct xe_gt_sriov_state_snapshot *snapshot; - loff_t pos = 0; - ssize_t ret; + int ret; - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); - xe_gt_assert(gt, vfid != PFID); - xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + ret = ptr_ring_produce(&pf_pick_gt_migration(gt, vfid)->ring, data); + if (ret) + return ret; - if (!pf_migration_supported(gt)) - return -ENOPKG; + wake_up_all(xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid)); - mutex_lock(pf_migration_mutex(gt)); - snapshot = pf_pick_vf_snapshot(gt, vfid); - ret = pf_alloc_guc_state(gt, snapshot, size); - if (!ret) { - ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size); - if (ret < 0) - pf_free_guc_state(gt, snapshot); - else - pf_dump_guc_state(gt, snapshot); + return 0; +} + +/** + * xe_gt_sriov_pf_migration_restore_consume() - Get VF restore data packet from migration ring. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * Called by the restore migration data consumer (PF SR-IOV Control worker) when + * processing migration data. + * Wakes up the restore migration data producer (userspace), that is + * potentially waiting to add more data when the ring is full. + * + * Return: Pointer to &xe_sriov_packet on success, + * NULL if ring is empty. + */ +struct xe_sriov_packet * +xe_gt_sriov_pf_migration_restore_consume(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + struct wait_queue_head *wq = xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid); + struct xe_sriov_packet *data; + + data = ptr_ring_consume(&migration->ring); + if (data) + wake_up_all(wq); + + return data; +} + +static bool pf_restore_data_ready(struct xe_gt *gt, unsigned int vfid) +{ + if (xe_gt_sriov_pf_control_check_restore_failed(gt, vfid) || + !ptr_ring_full(&pf_pick_gt_migration(gt, vfid)->ring)) + return true; + + return false; +} + +/** + * xe_gt_sriov_pf_migration_restore_produce() - Add VF restore data packet to migration ring. + * @gt: the &xe_gt + * @vfid: the VF identifier + * @data: the &xe_sriov_packet + * + * Called by the restore migration data producer (userspace) when processing + * migration data. + * If the ring is full, waits until there is space. + * Queues the restore migration data consumer (PF SR-IOV Control worker), that + * is potentially waiting for data when the ring was empty. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data) +{ + int ret; + + xe_gt_assert(gt, data->hdr.tile_id == gt->tile->id); + xe_gt_assert(gt, data->hdr.gt_id == gt->info.id); + + for (;;) { + if (xe_gt_sriov_pf_control_check_restore_failed(gt, vfid)) + return -EIO; + + ret = ptr_ring_produce(&pf_pick_gt_migration(gt, vfid)->ring, data); + if (!ret) + break; + + ret = wait_event_interruptible(*xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid), + pf_restore_data_ready(gt, vfid)); + if (ret) + return ret; } - mutex_unlock(pf_migration_mutex(gt)); - return ret; + return xe_gt_sriov_pf_control_process_restore_data(gt, vfid); } -#endif /* CONFIG_DEBUG_FS */ -static bool pf_check_migration_support(struct xe_gt *gt) +/** + * xe_gt_sriov_pf_migration_save_consume() - Get VF save data packet from migration ring. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * Called by the save migration data consumer (userspace) when + * processing migration data. + * Queues the save migration data producer (PF SR-IOV Control worker), that is + * potentially waiting to add more data when the ring is full. + * + * Return: Pointer to &xe_sriov_packet on success, + * NULL if ring is empty and there's no more data available, + * ERR_PTR(-EAGAIN) if the ring is empty, but data is still produced. + */ +struct xe_sriov_packet * +xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid) { - /* GuC 70.25 with save/restore v2 is required */ - xe_gt_assert(gt, GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 25, 0)); + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + struct xe_sriov_packet *data; + int ret; + + data = ptr_ring_consume(&migration->ring); + if (data) { + ret = xe_gt_sriov_pf_control_process_save_data(gt, vfid); + if (ret) { + xe_sriov_packet_free(data); + return ERR_PTR(ret); + } + + return data; + } + + if (xe_gt_sriov_pf_control_check_save_data_done(gt, vfid)) + return NULL; + + if (xe_gt_sriov_pf_control_check_save_failed(gt, vfid)) + return ERR_PTR(-EIO); - /* XXX: for now this is for feature enabling only */ - return IS_ENABLED(CONFIG_DRM_XE_DEBUG); + return ERR_PTR(-EAGAIN); +} + +static void destroy_pf_packet(void *ptr) +{ + struct xe_sriov_packet *data = ptr; + + xe_sriov_packet_free(data); +} + +static void action_ring_cleanup(void *arg) +{ + struct ptr_ring *r = arg; + + ptr_ring_cleanup(r, destroy_pf_packet); } /** @@ -402,18 +1034,27 @@ static bool pf_check_migration_support(struct xe_gt *gt) int xe_gt_sriov_pf_migration_init(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + unsigned int n, totalvfs; int err; xe_gt_assert(gt, IS_SRIOV_PF(xe)); - gt->sriov.pf.migration.supported = pf_check_migration_support(gt); - if (!pf_migration_supported(gt)) return 0; - err = drmm_mutex_init(&xe->drm, >->sriov.pf.migration.snapshot_lock); - if (err) - return err; + totalvfs = xe_sriov_pf_get_totalvfs(xe); + for (n = 1; n <= totalvfs; n++) { + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, n); + + err = ptr_ring_init(&migration->ring, + XE_GT_SRIOV_PF_MIGRATION_RING_SIZE, GFP_KERNEL); + if (err) + return err; + + err = devm_add_action_or_reset(xe->drm.dev, action_ring_cleanup, &migration->ring); + if (err) + return err; + } return 0; } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h index 09faeae00ddb..181207a637b9 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -9,16 +9,46 @@ #include <linux/types.h> struct xe_gt; +struct xe_sriov_packet; +enum xe_sriov_packet_type; + +/* TODO: get this information by querying GuC in the future */ +#define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M int xe_gt_sriov_pf_migration_init(struct xe_gt *gt); -int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid); -int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid); - -#ifdef CONFIG_DEBUG_FS -ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid, - char __user *buf, size_t count, loff_t *pos); -ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid, - const char __user *buf, size_t count); -#endif +int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data); +int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data); +int xe_gt_sriov_pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data); +int xe_gt_sriov_pf_migration_vram_save(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_vram_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data); + +ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid); + +bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid); +bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid); +void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid); + +void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid); +bool xe_gt_sriov_pf_migration_save_data_pending(struct xe_gt *gt, unsigned int vfid, + enum xe_sriov_packet_type type); +void xe_gt_sriov_pf_migration_save_data_complete(struct xe_gt *gt, unsigned int vfid, + enum xe_sriov_packet_type type); + +int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data); +struct xe_sriov_packet * +xe_gt_sriov_pf_migration_restore_consume(struct xe_gt *gt, unsigned int vfid); + +int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_packet *data); +struct xe_sriov_packet * +xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid); #endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h index 1f3110b6d44f..f50c64241e9c 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h @@ -6,35 +6,23 @@ #ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_ #define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_ -#include <linux/mutex.h> -#include <linux/types.h> +#include <linux/ptr_ring.h> /** - * struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data. + * struct xe_gt_sriov_migration_data - GT-level per-VF migration data. * * Used by the PF driver to maintain per-VF migration data. */ -struct xe_gt_sriov_state_snapshot { - /** @guc: GuC VF state snapshot */ +struct xe_gt_sriov_migration_data { + /** @ring: queue containing VF save / restore migration data */ + struct ptr_ring ring; + /** @save: structure for currently processed save migration data */ struct { - /** @guc.buff: buffer with the VF state */ - u32 *buff; - /** @guc.size: size of the buffer (must be dwords aligned) */ - u32 size; - } guc; -}; - -/** - * struct xe_gt_sriov_pf_migration - GT-level data. - * - * Used by the PF driver to maintain non-VF specific per-GT data. - */ -struct xe_gt_sriov_pf_migration { - /** @supported: indicates whether the feature is supported */ - bool supported; - - /** @snapshot_lock: protects all VFs snapshots */ - struct mutex snapshot_lock; + /** @save.data_remaining: bitmap of migration types that need to be saved */ + unsigned long data_remaining; + /** @save.vram_offset: last saved offset within VRAM, used for chunked VRAM save */ + loff_t vram_offset; + } save; }; #endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c index 76dd9233ef9f..2eb21610e5a0 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c @@ -99,11 +99,30 @@ static const struct xe_reg ver_3000_runtime_regs[] = { HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; +static const struct xe_reg ver_35_runtime_regs[] = { + RPM_CONFIG0, /* _MMIO(0x0d00) */ + XEHP_FUSE4, /* _MMIO(0x9114) */ + MIRROR_FUSE3, /* _MMIO(0x9118) */ + MIRROR_L3BANK_ENABLE, /* _MMIO(0x9130) */ + XELP_EU_ENABLE, /* _MMIO(0x9134) */ + XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */ + GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ + XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */ + XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */ + XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */ + XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */ + XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */ + SERVICE_COPY_ENABLE, /* _MMIO(0x9170) */ +}; + static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count) { const struct xe_reg *regs; - if (GRAPHICS_VERx100(xe) >= 3000) { + if (GRAPHICS_VER(xe) >= 35) { + *count = ARRAY_SIZE(ver_35_runtime_regs); + regs = ver_35_runtime_regs; + } else if (GRAPHICS_VERx100(xe) >= 3000) { *count = ARRAY_SIZE(ver_3000_runtime_regs); regs = ver_3000_runtime_regs; } else if (GRAPHICS_VERx100(xe) >= 2000) { diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h index a64a6835ad65..667b8310478d 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h @@ -31,8 +31,8 @@ struct xe_gt_sriov_metadata { /** @version: negotiated VF/PF ABI version */ struct xe_gt_sriov_pf_service_version version; - /** @snapshot: snapshot of the VF state data */ - struct xe_gt_sriov_state_snapshot snapshot; + /** @migration: per-VF migration data. */ + struct xe_gt_sriov_migration_data migration; }; /** @@ -58,7 +58,6 @@ struct xe_gt_sriov_pf { struct xe_gt_sriov_pf_service service; struct xe_gt_sriov_pf_control control; struct xe_gt_sriov_pf_policy policy; - struct xe_gt_sriov_pf_migration migration; struct xe_gt_sriov_spare_config spare; struct xe_gt_sriov_metadata *vfs; }; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_printk.h b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h index 17624b16300a..d3457d608db8 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_printk.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h @@ -7,10 +7,13 @@ #define _XE_GT_SRIOV_PRINTK_H_ #include "xe_gt_printk.h" -#include "xe_sriov_printk.h" +#include "xe_tile_sriov_printk.h" + +#define __XE_GT_SRIOV_PRINTK_FMT(_gt, _fmt, ...) \ + __XE_TILE_SRIOV_PRINTK_FMT((_gt)->tile, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__)) #define __xe_gt_sriov_printk(gt, _level, fmt, ...) \ - xe_gt_printk((gt), _level, "%s" fmt, xe_sriov_printk_prefix(gt_to_xe(gt)), ##__VA_ARGS__) + xe_sriov_##_level(gt_to_xe(gt), __XE_GT_SRIOV_PRINTK_FMT((gt), fmt, ##__VA_ARGS__)) #define xe_gt_sriov_err(_gt, _fmt, ...) \ __xe_gt_sriov_printk(_gt, err, _fmt, ##__VA_ARGS__) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index 0461d5513487..4c73a077d314 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -23,12 +23,19 @@ #include "xe_gt_sriov_vf.h" #include "xe_gt_sriov_vf_types.h" #include "xe_guc.h" +#include "xe_guc_ct.h" #include "xe_guc_hxg_helpers.h" #include "xe_guc_relay.h" +#include "xe_guc_submit.h" +#include "xe_irq.h" #include "xe_lrc.h" +#include "xe_memirq.h" #include "xe_mmio.h" #include "xe_sriov.h" #include "xe_sriov_vf.h" +#include "xe_sriov_vf_ccs.h" +#include "xe_tile_sriov_vf.h" +#include "xe_tlb_inval.h" #include "xe_uc_fw.h" #include "xe_wopcm.h" @@ -307,13 +314,13 @@ static int guc_action_vf_notify_resfix_done(struct xe_guc *guc) } /** - * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed. + * vf_notify_resfix_done - Notify GuC about resource fixups apply completed. * @gt: the &xe_gt struct instance linked to target GuC * * Returns: 0 if the operation completed successfully, or a negative error * code otherwise. */ -int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt) +static int vf_notify_resfix_done(struct xe_gt *gt) { struct xe_guc *guc = >->uc.guc; int err; @@ -433,13 +440,17 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt) static int vf_get_ggtt_info(struct xe_gt *gt) { - struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; + struct xe_tile *tile = gt_to_tile(gt); + struct xe_ggtt *ggtt = tile->mem.ggtt; struct xe_guc *guc = >->uc.guc; - u64 start, size; + u64 start, size, ggtt_size; + s64 shift; int err; xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + guard(mutex)(&ggtt->lock); + err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start); if (unlikely(err)) return err; @@ -448,28 +459,44 @@ static int vf_get_ggtt_info(struct xe_gt *gt) if (unlikely(err)) return err; - if (config->ggtt_size && config->ggtt_size != size) { + if (!size) + return -ENODATA; + + ggtt_size = xe_tile_sriov_vf_ggtt(tile); + if (ggtt_size && ggtt_size != size) { xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n", - size / SZ_1K, config->ggtt_size / SZ_1K); + size / SZ_1K, ggtt_size / SZ_1K); return -EREMCHG; } xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n", start, start + size - 1, size / SZ_1K); - config->ggtt_shift = start - (s64)config->ggtt_base; - config->ggtt_base = start; - config->ggtt_size = size; + shift = start - (s64)xe_tile_sriov_vf_ggtt_base(tile); + xe_tile_sriov_vf_ggtt_base_store(tile, start); + xe_tile_sriov_vf_ggtt_store(tile, size); - return config->ggtt_size ? 0 : -ENODATA; + if (shift && shift != start) { + xe_gt_sriov_info(gt, "Shifting GGTT base by %lld to 0x%016llx\n", + shift, start); + xe_tile_sriov_vf_fixup_ggtt_nodes_locked(gt_to_tile(gt), shift); + } + + if (xe_sriov_vf_migration_supported(gt_to_xe(gt))) { + WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, false); + smp_wmb(); /* Ensure above write visible before wake */ + wake_up_all(>->sriov.vf.migration.wq); + } + + return 0; } static int vf_get_lmem_info(struct xe_gt *gt) { - struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; + struct xe_tile *tile = gt_to_tile(gt); struct xe_guc *guc = >->uc.guc; char size_str[10]; - u64 size; + u64 size, lmem_size; int err; xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); @@ -478,18 +505,19 @@ static int vf_get_lmem_info(struct xe_gt *gt) if (unlikely(err)) return err; - if (config->lmem_size && config->lmem_size != size) { + lmem_size = xe_tile_sriov_vf_lmem(tile); + if (lmem_size && lmem_size != size) { xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n", - size / SZ_1M, config->lmem_size / SZ_1M); + size / SZ_1M, lmem_size / SZ_1M); return -EREMCHG; } string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str)); xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str); - config->lmem_size = size; + xe_tile_sriov_vf_lmem_store(tile, size); - return config->lmem_size ? 0 : -ENODATA; + return size ? 0 : -ENODATA; } static int vf_get_submission_cfg(struct xe_gt *gt) @@ -540,7 +568,9 @@ static void vf_cache_gmdid(struct xe_gt *gt) * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO. * @gt: the &xe_gt * - * This function is for VF use only. + * This function is for VF use only. This function may shift the GGTT and is + * performed under GGTT lock, making this step visible to all GTs that share a + * GGTT. * * Return: 0 on success or a negative error code on failure. */ @@ -586,75 +616,6 @@ u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt) return gt->sriov.vf.self_config.num_ctxs; } -/** - * xe_gt_sriov_vf_lmem - VF LMEM configuration. - * @gt: the &xe_gt - * - * This function is for VF use only. - * - * Return: size of the LMEM assigned to VF. - */ -u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt) -{ - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); - xe_gt_assert(gt, gt->sriov.vf.guc_version.major); - xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size); - - return gt->sriov.vf.self_config.lmem_size; -} - -/** - * xe_gt_sriov_vf_ggtt - VF GGTT configuration. - * @gt: the &xe_gt - * - * This function is for VF use only. - * - * Return: size of the GGTT assigned to VF. - */ -u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt) -{ - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); - xe_gt_assert(gt, gt->sriov.vf.guc_version.major); - xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size); - - return gt->sriov.vf.self_config.ggtt_size; -} - -/** - * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset. - * @gt: the &xe_gt - * - * This function is for VF use only. - * - * Return: base offset of the GGTT assigned to VF. - */ -u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt) -{ - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); - xe_gt_assert(gt, gt->sriov.vf.guc_version.major); - xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size); - - return gt->sriov.vf.self_config.ggtt_base; -} - -/** - * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration - * @gt: the &xe_gt struct instance - * - * This function is for VF use only. - * - * Return: The shift value; could be negative - */ -s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt) -{ - struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; - - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); - xe_gt_assert(gt, xe_gt_is_main_type(gt)); - - return config->ggtt_shift; -} - static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor) { u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = { @@ -755,7 +716,7 @@ failed: * xe_gt_sriov_vf_default_lrcs_hwsp_rebase - Update GGTT references in HWSP of default LRCs. * @gt: the &xe_gt struct instance */ -void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt) +static void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt) { struct xe_hw_engine *hwe; enum xe_hw_engine_id id; @@ -764,6 +725,31 @@ void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt) xe_default_lrc_update_memirq_regs_with_address(hwe); } +static void vf_start_migration_recovery(struct xe_gt *gt) +{ + bool started; + + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + + spin_lock(>->sriov.vf.migration.lock); + + if (!gt->sriov.vf.migration.recovery_queued || + !gt->sriov.vf.migration.recovery_teardown) { + gt->sriov.vf.migration.recovery_queued = true; + WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true); + WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, true); + smp_wmb(); /* Ensure above writes visible before wake */ + + xe_guc_ct_wake_waiters(>->uc.guc.ct); + + started = queue_work(gt->ordered_wq, >->sriov.vf.migration.worker); + xe_gt_sriov_info(gt, "VF migration recovery %s\n", started ? + "scheduled" : "already in progress"); + } + + spin_unlock(>->sriov.vf.migration.lock); +} + /** * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery, * or just mark that a GuC is ready for it. @@ -776,16 +762,15 @@ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt) struct xe_device *xe = gt_to_xe(gt); xe_gt_assert(gt, IS_SRIOV_VF(xe)); + xe_gt_assert(gt, xe_gt_sriov_vf_recovery_pending(gt)); - set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags); - /* - * We need to be certain that if all flags were set, at least one - * thread will notice that and schedule the recovery. - */ - smp_mb__after_atomic(); + if (!xe_sriov_vf_migration_supported(xe)) { + xe_gt_sriov_err(gt, "migration not supported\n"); + return; + } xe_gt_sriov_info(gt, "ready for recovery after migration\n"); - xe_sriov_vf_start_migration_recovery(xe); + vf_start_migration_recovery(gt); } static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor) @@ -1040,22 +1025,25 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p) { struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; struct xe_device *xe = gt_to_xe(gt); + u64 lmem_size; char buf[10]; xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); - drm_printf(p, "GGTT range:\t%#llx-%#llx\n", - config->ggtt_base, - config->ggtt_base + config->ggtt_size - 1); - - string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf)); - drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf); + if (xe_gt_is_main_type(gt)) { + u64 ggtt_size = xe_tile_sriov_vf_ggtt(gt_to_tile(gt)); + u64 ggtt_base = xe_tile_sriov_vf_ggtt_base(gt_to_tile(gt)); - drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift); + drm_printf(p, "GGTT range:\t%#llx-%#llx\n", + ggtt_base, ggtt_base + ggtt_size - 1); + string_get_size(ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf)); + drm_printf(p, "GGTT size:\t%llu (%s)\n", ggtt_size, buf); - if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) { - string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf)); - drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf); + if (IS_DGFX(xe)) { + lmem_size = xe_tile_sriov_vf_lmem(gt_to_tile(gt)); + string_get_size(lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf)); + drm_printf(p, "LMEM size:\t%llu (%s)\n", lmem_size, buf); + } } drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs); @@ -1118,3 +1106,272 @@ void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p) drm_printf(p, "\thandshake:\t%u.%u\n", pf_version->major, pf_version->minor); } + +static bool vf_post_migration_shutdown(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + + /* + * On platforms where CCS must be restored by the primary GT, the media + * GT's VF post-migration recovery must run afterward. Detect this case + * and re-queue the media GT's restore work item if necessary. + */ + if (xe->info.needs_shared_vf_gt_wq && xe_gt_is_media_type(gt)) { + struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt; + + if (xe_gt_sriov_vf_recovery_pending(primary_gt)) + return true; + } + + spin_lock_irq(>->sriov.vf.migration.lock); + gt->sriov.vf.migration.recovery_queued = false; + spin_unlock_irq(>->sriov.vf.migration.lock); + + xe_guc_ct_flush_and_stop(>->uc.guc.ct); + xe_guc_submit_pause(>->uc.guc); + xe_tlb_inval_reset(>->tlb_inval); + + return false; +} + +static size_t post_migration_scratch_size(struct xe_device *xe) +{ + return max(xe_lrc_reg_size(xe), LRC_WA_BB_SIZE); +} + +static int vf_post_migration_fixups(struct xe_gt *gt) +{ + void *buf = gt->sriov.vf.migration.scratch; + int err; + + /* xe_gt_sriov_vf_query_config will fixup the GGTT addresses */ + err = xe_gt_sriov_vf_query_config(gt); + if (err) + return err; + + if (xe_gt_is_main_type(gt)) + xe_sriov_vf_ccs_rebase(gt_to_xe(gt)); + + xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt); + err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf); + if (err) + return err; + + return 0; +} + +static void vf_post_migration_rearm(struct xe_gt *gt) +{ + xe_guc_ct_restart(>->uc.guc.ct); + xe_guc_submit_unpause_prepare(>->uc.guc); +} + +static void vf_post_migration_kickstart(struct xe_gt *gt) +{ + xe_guc_submit_unpause(>->uc.guc); +} + +static void vf_post_migration_abort(struct xe_gt *gt) +{ + spin_lock_irq(>->sriov.vf.migration.lock); + WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false); + WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, false); + spin_unlock_irq(>->sriov.vf.migration.lock); + + wake_up_all(>->sriov.vf.migration.wq); + + xe_guc_submit_pause_abort(>->uc.guc); +} + +static int vf_post_migration_notify_resfix_done(struct xe_gt *gt) +{ + bool skip_resfix = false; + + spin_lock_irq(>->sriov.vf.migration.lock); + if (gt->sriov.vf.migration.recovery_queued) { + skip_resfix = true; + xe_gt_sriov_dbg(gt, "another recovery imminent, resfix skipped\n"); + } else { + WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false); + } + spin_unlock_irq(>->sriov.vf.migration.lock); + + if (skip_resfix) + return -EAGAIN; + + /* + * Make sure interrupts on the new HW are properly set. The GuC IRQ + * must be working at this point, since the recovery did started, + * but the rest was not enabled using the procedure from spec. + */ + xe_irq_resume(gt_to_xe(gt)); + + return vf_notify_resfix_done(gt); +} + +static void vf_post_migration_recovery(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + int err; + bool retry; + + xe_gt_sriov_dbg(gt, "migration recovery in progress\n"); + + retry = vf_post_migration_shutdown(gt); + if (retry) + goto queue; + + if (!xe_sriov_vf_migration_supported(xe)) { + xe_gt_sriov_err(gt, "migration is not supported\n"); + err = -ENOTRECOVERABLE; + goto fail; + } + + err = vf_post_migration_fixups(gt); + if (err) + goto fail; + + vf_post_migration_rearm(gt); + + err = vf_post_migration_notify_resfix_done(gt); + if (err && err != -EAGAIN) + goto fail; + + vf_post_migration_kickstart(gt); + + xe_gt_sriov_notice(gt, "migration recovery ended\n"); + return; +fail: + vf_post_migration_abort(gt); + xe_gt_sriov_err(gt, "migration recovery failed (%pe)\n", ERR_PTR(err)); + xe_device_declare_wedged(xe); + return; + +queue: + xe_gt_sriov_info(gt, "Re-queuing migration recovery\n"); + queue_work(gt->ordered_wq, >->sriov.vf.migration.worker); +} + +static void migration_worker_func(struct work_struct *w) +{ + struct xe_gt *gt = container_of(w, struct xe_gt, + sriov.vf.migration.worker); + + vf_post_migration_recovery(gt); +} + +static void vf_migration_fini(void *arg) +{ + struct xe_gt *gt = arg; + + spin_lock_irq(>->sriov.vf.migration.lock); + gt->sriov.vf.migration.recovery_teardown = true; + spin_unlock_irq(>->sriov.vf.migration.lock); + + cancel_work_sync(>->sriov.vf.migration.worker); +} + +/** + * xe_gt_sriov_vf_init_early() - GT VF init early + * @gt: the &xe_gt + * + * Return 0 on success, errno on failure + */ +int xe_gt_sriov_vf_init_early(struct xe_gt *gt) +{ + void *buf; + + if (!xe_sriov_vf_migration_supported(gt_to_xe(gt))) + return 0; + + buf = drmm_kmalloc(>_to_xe(gt)->drm, + post_migration_scratch_size(gt_to_xe(gt)), + GFP_KERNEL); + if (!buf) + return -ENOMEM; + + gt->sriov.vf.migration.scratch = buf; + spin_lock_init(>->sriov.vf.migration.lock); + INIT_WORK(>->sriov.vf.migration.worker, migration_worker_func); + init_waitqueue_head(>->sriov.vf.migration.wq); + + return 0; +} + +/** + * xe_gt_sriov_vf_init() - GT VF init + * @gt: the &xe_gt + * + * Return 0 on success, errno on failure + */ +int xe_gt_sriov_vf_init(struct xe_gt *gt) +{ + if (!xe_sriov_vf_migration_supported(gt_to_xe(gt))) + return 0; + + /* + * We want to tear down the VF post-migration early during driver + * unload; therefore, we add this finalization action later during + * driver load. + */ + return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, + vf_migration_fini, gt); +} + +/** + * xe_gt_sriov_vf_recovery_pending() - VF post migration recovery pending + * @gt: the &xe_gt + * + * The return value of this function must be immediately visible upon vCPU + * unhalt and must persist until RESFIX_DONE is issued. This guarantee is + * currently implemented only for platforms that support memirq. If non-memirq + * platforms begin to support VF migration, this function will need to be + * updated accordingly. + * + * Return: True if VF post migration recovery is pending, False otherwise + */ +bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt) +{ + struct xe_memirq *memirq = >_to_tile(gt)->memirq; + + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + + /* early detection until recovery starts */ + if (xe_device_uses_memirq(gt_to_xe(gt)) && + xe_memirq_guc_sw_int_0_irq_pending(memirq, >->uc.guc)) + return true; + + return READ_ONCE(gt->sriov.vf.migration.recovery_inprogress); +} + +static bool vf_valid_ggtt(struct xe_gt *gt) +{ + struct xe_memirq *memirq = >_to_tile(gt)->memirq; + bool irq_pending = xe_device_uses_memirq(gt_to_xe(gt)) && + xe_memirq_guc_sw_int_0_irq_pending(memirq, >->uc.guc); + + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + + if (irq_pending || READ_ONCE(gt->sriov.vf.migration.ggtt_need_fixes)) + return false; + + return true; +} + +/** + * xe_gt_sriov_vf_wait_valid_ggtt() - VF wait for valid GGTT addresses + * @gt: the &xe_gt + */ +void xe_gt_sriov_vf_wait_valid_ggtt(struct xe_gt *gt) +{ + int ret; + + if (!IS_SRIOV_VF(gt_to_xe(gt)) || + !xe_sriov_vf_migration_supported(gt_to_xe(gt))) + return; + + ret = wait_event_interruptible_timeout(gt->sriov.vf.migration.wq, + vf_valid_ggtt(gt), + HZ * 5); + xe_gt_WARN_ON(gt, !ret); +} diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h index 0af1dc769fe0..af40276790fa 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h @@ -21,16 +21,15 @@ void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt, int xe_gt_sriov_vf_query_config(struct xe_gt *gt); int xe_gt_sriov_vf_connect(struct xe_gt *gt); int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt); -void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt); -int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt); void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt); +int xe_gt_sriov_vf_init_early(struct xe_gt *gt); +int xe_gt_sriov_vf_init(struct xe_gt *gt); +bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt); + u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt); u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt); u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt); -u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt); -u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt); -s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt); u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg); void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val); @@ -39,4 +38,6 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p); void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p); void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p); +void xe_gt_sriov_vf_wait_valid_ggtt(struct xe_gt *gt); + #endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h index 298dedf4b009..420b0e6089de 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h @@ -7,20 +7,14 @@ #define _XE_GT_SRIOV_VF_TYPES_H_ #include <linux/types.h> +#include <linux/wait.h> +#include <linux/workqueue.h> #include "xe_uc_fw_types.h" /** * struct xe_gt_sriov_vf_selfconfig - VF configuration data. */ struct xe_gt_sriov_vf_selfconfig { - /** @ggtt_base: assigned base offset of the GGTT region. */ - u64 ggtt_base; - /** @ggtt_size: assigned size of the GGTT region. */ - u64 ggtt_size; - /** @ggtt_shift: difference in ggtt_base on last migration */ - s64 ggtt_shift; - /** @lmem_size: assigned size of the LMEM. */ - u64 lmem_size; /** @num_ctxs: assigned number of GuC submission context IDs. */ u16 num_ctxs; /** @num_dbs: assigned number of GuC doorbells IDs. */ @@ -47,6 +41,28 @@ struct xe_gt_sriov_vf_runtime { }; /** + * xe_gt_sriov_vf_migration - VF migration data. + */ +struct xe_gt_sriov_vf_migration { + /** @migration: VF migration recovery worker */ + struct work_struct worker; + /** @lock: Protects recovery_queued, teardown */ + spinlock_t lock; + /** @wq: wait queue for migration fixes */ + wait_queue_head_t wq; + /** @scratch: Scratch memory for VF recovery */ + void *scratch; + /** @recovery_teardown: VF post migration recovery is being torn down */ + bool recovery_teardown; + /** @recovery_queued: VF post migration recovery in queued */ + bool recovery_queued; + /** @recovery_inprogress: VF post migration recovery in progress */ + bool recovery_inprogress; + /** @ggtt_need_fixes: VF GGTT needs fixes */ + bool ggtt_need_fixes; +}; + +/** * struct xe_gt_sriov_vf - GT level VF virtualization data. */ struct xe_gt_sriov_vf { @@ -58,6 +74,8 @@ struct xe_gt_sriov_vf { struct xe_gt_sriov_vf_selfconfig self_config; /** @runtime: runtime data retrieved from the PF. */ struct xe_gt_sriov_vf_runtime runtime; + /** @migration: migration data for the VF. */ + struct xe_gt_sriov_vf_migration migration; }; #endif diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c index aa962c783cdf..82c5fbcdfbe3 100644 --- a/drivers/gpu/drm/xe/xe_gt_throttle.c +++ b/drivers/gpu/drm/xe/xe_gt_throttle.c @@ -8,221 +8,222 @@ #include <regs/xe_gt_regs.h> #include "xe_device.h" #include "xe_gt.h" -#include "xe_gt_printk.h" #include "xe_gt_sysfs.h" #include "xe_gt_throttle.h" #include "xe_mmio.h" +#include "xe_platform_types.h" #include "xe_pm.h" /** * DOC: Xe GT Throttle * - * Provides sysfs entries and other helpers for frequency throttle reasons in GT + * The GT frequency may be throttled by hardware/firmware for various reasons + * that are provided through attributes under the ``freq0/throttle/`` directory. + * Their availability depend on the platform and some may not be visible if that + * reason is not available. * - * device/gt#/freq0/throttle/status - Overall status - * device/gt#/freq0/throttle/reason_pl1 - Frequency throttle due to PL1 - * device/gt#/freq0/throttle/reason_pl2 - Frequency throttle due to PL2 - * device/gt#/freq0/throttle/reason_pl4 - Frequency throttle due to PL4, Iccmax etc. - * device/gt#/freq0/throttle/reason_thermal - Frequency throttle due to thermal - * device/gt#/freq0/throttle/reason_prochot - Frequency throttle due to prochot - * device/gt#/freq0/throttle/reason_ratl - Frequency throttle due to RATL - * device/gt#/freq0/throttle/reason_vr_thermalert - Frequency throttle due to VR THERMALERT - * device/gt#/freq0/throttle/reason_vr_tdc - Frequency throttle due to VR TDC + * The ``reasons`` attribute can be used by sysadmin to monitor all possible + * reasons for throttling and report them. It's preferred over monitoring + * ``status`` and then reading the reason from individual attributes since that + * is racy. If there's no throttling happening, "none" is returned. + * + * The following attributes are available on Crescent Island platform: + * + * - ``status``: Overall throttle status (0: no throttling, 1: throttling) + * - ``reasons``: Array of reasons causing throttling separated by space + * - ``reason_pl1``: package PL1 + * - ``reason_pl2``: package PL2 + * - ``reason_pl4``: package PL4 + * - ``reason_prochot``: prochot + * - ``reason_soc_thermal``: SoC thermal + * - ``reason_mem_thermal``: Memory thermal + * - ``reason_vr_thermal``: VR thermal + * - ``reason_iccmax``: ICCMAX + * - ``reason_ratl``: RATL thermal algorithm + * - ``reason_soc_avg_thermal``: SoC average temp + * - ``reason_fastvmode``: VR is hitting FastVMode + * - ``reason_psys_pl1``: PSYS PL1 + * - ``reason_psys_pl2``: PSYS PL2 + * - ``reason_p0_freq``: P0 frequency + * - ``reason_psys_crit``: PSYS critical + * + * Other platforms support the following reasons: + * + * - ``status``: Overall throttle status (0: no throttling, 1: throttling) + * - ``reasons``: Array of reasons causing throttling separated by space + * - ``reason_pl1``: package PL1 + * - ``reason_pl2``: package PL2 + * - ``reason_pl4``: package PL4, Iccmax etc. + * - ``reason_thermal``: thermal + * - ``reason_prochot``: prochot + * - ``reason_ratl``: RATL hermal algorithm + * - ``reason_vr_thermalert``: VR THERMALERT + * - ``reason_vr_tdc``: VR TDC */ -static struct xe_gt * -dev_to_gt(struct device *dev) -{ - return kobj_to_gt(dev->kobj.parent); -} - -u32 xe_gt_throttle_get_limit_reasons(struct xe_gt *gt) -{ - u32 reg; - - xe_pm_runtime_get(gt_to_xe(gt)); - if (xe_gt_is_media_type(gt)) - reg = xe_mmio_read32(>->mmio, MTL_MEDIA_PERF_LIMIT_REASONS); - else - reg = xe_mmio_read32(>->mmio, GT0_PERF_LIMIT_REASONS); - xe_pm_runtime_put(gt_to_xe(gt)); - - return reg; -} - -static u32 read_status(struct xe_gt *gt) -{ - u32 status = xe_gt_throttle_get_limit_reasons(gt) & GT0_PERF_LIMIT_REASONS_MASK; - - xe_gt_dbg(gt, "throttle reasons: 0x%08x\n", status); - return status; -} +struct throttle_attribute { + struct kobj_attribute attr; + u32 mask; +}; -static u32 read_reason_pl1(struct xe_gt *gt) +static struct xe_gt *dev_to_gt(struct device *dev) { - u32 pl1 = xe_gt_throttle_get_limit_reasons(gt) & POWER_LIMIT_1_MASK; - - return pl1; + return kobj_to_gt(dev->kobj.parent); } -static u32 read_reason_pl2(struct xe_gt *gt) +static struct xe_gt *throttle_to_gt(struct kobject *kobj) { - u32 pl2 = xe_gt_throttle_get_limit_reasons(gt) & POWER_LIMIT_2_MASK; - - return pl2; + return dev_to_gt(kobj_to_dev(kobj)); } -static u32 read_reason_pl4(struct xe_gt *gt) +static struct throttle_attribute *kobj_attribute_to_throttle(struct kobj_attribute *attr) { - u32 pl4 = xe_gt_throttle_get_limit_reasons(gt) & POWER_LIMIT_4_MASK; - - return pl4; + return container_of(attr, struct throttle_attribute, attr); } -static u32 read_reason_thermal(struct xe_gt *gt) -{ - u32 thermal = xe_gt_throttle_get_limit_reasons(gt) & THERMAL_LIMIT_MASK; - - return thermal; -} - -static u32 read_reason_prochot(struct xe_gt *gt) +u32 xe_gt_throttle_get_limit_reasons(struct xe_gt *gt) { - u32 prochot = xe_gt_throttle_get_limit_reasons(gt) & PROCHOT_MASK; - - return prochot; -} + struct xe_device *xe = gt_to_xe(gt); + struct xe_reg reg; + u32 val, mask; -static u32 read_reason_ratl(struct xe_gt *gt) -{ - u32 ratl = xe_gt_throttle_get_limit_reasons(gt) & RATL_MASK; + if (xe_gt_is_media_type(gt)) + reg = MTL_MEDIA_PERF_LIMIT_REASONS; + else + reg = GT0_PERF_LIMIT_REASONS; - return ratl; -} + if (xe->info.platform == XE_CRESCENTISLAND) + mask = CRI_PERF_LIMIT_REASONS_MASK; + else + mask = GT0_PERF_LIMIT_REASONS_MASK; -static u32 read_reason_vr_thermalert(struct xe_gt *gt) -{ - u32 thermalert = xe_gt_throttle_get_limit_reasons(gt) & VR_THERMALERT_MASK; + xe_pm_runtime_get(xe); + val = xe_mmio_read32(>->mmio, reg) & mask; + xe_pm_runtime_put(xe); - return thermalert; + return val; } -static u32 read_reason_vr_tdc(struct xe_gt *gt) +static bool is_throttled_by(struct xe_gt *gt, u32 mask) { - u32 tdc = xe_gt_throttle_get_limit_reasons(gt) & VR_TDC_MASK; - - return tdc; + return xe_gt_throttle_get_limit_reasons(gt) & mask; } -static ssize_t status_show(struct kobject *kobj, +static ssize_t reason_show(struct kobject *kobj, struct kobj_attribute *attr, char *buff) { - struct device *dev = kobj_to_dev(kobj); - struct xe_gt *gt = dev_to_gt(dev); - bool status = !!read_status(gt); + struct throttle_attribute *ta = kobj_attribute_to_throttle(attr); + struct xe_gt *gt = throttle_to_gt(kobj); - return sysfs_emit(buff, "%u\n", status); + return sysfs_emit(buff, "%u\n", is_throttled_by(gt, ta->mask)); } -static struct kobj_attribute attr_status = __ATTR_RO(status); -static ssize_t reason_pl1_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buff) -{ - struct device *dev = kobj_to_dev(kobj); - struct xe_gt *gt = dev_to_gt(dev); - bool pl1 = !!read_reason_pl1(gt); +static const struct attribute_group *get_platform_throttle_group(struct xe_device *xe); - return sysfs_emit(buff, "%u\n", pl1); -} -static struct kobj_attribute attr_reason_pl1 = __ATTR_RO(reason_pl1); - -static ssize_t reason_pl2_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buff) +static ssize_t reasons_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { - struct device *dev = kobj_to_dev(kobj); - struct xe_gt *gt = dev_to_gt(dev); - bool pl2 = !!read_reason_pl2(gt); + struct xe_gt *gt = throttle_to_gt(kobj); + struct xe_device *xe = gt_to_xe(gt); + const struct attribute_group *group; + struct attribute **pother; + ssize_t ret = 0; + u32 reasons; - return sysfs_emit(buff, "%u\n", pl2); -} -static struct kobj_attribute attr_reason_pl2 = __ATTR_RO(reason_pl2); + reasons = xe_gt_throttle_get_limit_reasons(gt); + if (!reasons) + goto ret_none; -static ssize_t reason_pl4_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buff) -{ - struct device *dev = kobj_to_dev(kobj); - struct xe_gt *gt = dev_to_gt(dev); - bool pl4 = !!read_reason_pl4(gt); + group = get_platform_throttle_group(xe); + for (pother = group->attrs; *pother; pother++) { + struct kobj_attribute *kattr = container_of(*pother, struct kobj_attribute, attr); + struct throttle_attribute *other_ta = kobj_attribute_to_throttle(kattr); - return sysfs_emit(buff, "%u\n", pl4); -} -static struct kobj_attribute attr_reason_pl4 = __ATTR_RO(reason_pl4); + if (other_ta->mask != U32_MAX && reasons & other_ta->mask) + ret += sysfs_emit_at(buff, ret, "%s ", (*pother)->name); + } -static ssize_t reason_thermal_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buff) -{ - struct device *dev = kobj_to_dev(kobj); - struct xe_gt *gt = dev_to_gt(dev); - bool thermal = !!read_reason_thermal(gt); + if (drm_WARN_ONCE(&xe->drm, !ret, "Unknown reason: %#x\n", reasons)) + goto ret_none; - return sysfs_emit(buff, "%u\n", thermal); -} -static struct kobj_attribute attr_reason_thermal = __ATTR_RO(reason_thermal); + /* Drop extra space from last iteration above */ + ret--; + ret += sysfs_emit_at(buff, ret, "\n"); -static ssize_t reason_prochot_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buff) -{ - struct device *dev = kobj_to_dev(kobj); - struct xe_gt *gt = dev_to_gt(dev); - bool prochot = !!read_reason_prochot(gt); + return ret; - return sysfs_emit(buff, "%u\n", prochot); +ret_none: + return sysfs_emit(buff, "none\n"); } -static struct kobj_attribute attr_reason_prochot = __ATTR_RO(reason_prochot); -static ssize_t reason_ratl_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buff) -{ - struct device *dev = kobj_to_dev(kobj); - struct xe_gt *gt = dev_to_gt(dev); - bool ratl = !!read_reason_ratl(gt); - - return sysfs_emit(buff, "%u\n", ratl); -} -static struct kobj_attribute attr_reason_ratl = __ATTR_RO(reason_ratl); - -static ssize_t reason_vr_thermalert_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buff) -{ - struct device *dev = kobj_to_dev(kobj); - struct xe_gt *gt = dev_to_gt(dev); - bool thermalert = !!read_reason_vr_thermalert(gt); - - return sysfs_emit(buff, "%u\n", thermalert); -} -static struct kobj_attribute attr_reason_vr_thermalert = __ATTR_RO(reason_vr_thermalert); - -static ssize_t reason_vr_tdc_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buff) -{ - struct device *dev = kobj_to_dev(kobj); - struct xe_gt *gt = dev_to_gt(dev); - bool tdc = !!read_reason_vr_tdc(gt); - - return sysfs_emit(buff, "%u\n", tdc); -} -static struct kobj_attribute attr_reason_vr_tdc = __ATTR_RO(reason_vr_tdc); +#define THROTTLE_ATTR_RO(name, _mask) \ + struct throttle_attribute attr_##name = { \ + .attr = __ATTR(name, 0444, reason_show, NULL), \ + .mask = _mask, \ + } + +#define THROTTLE_ATTR_RO_FUNC(name, _mask, _show) \ + struct throttle_attribute attr_##name = { \ + .attr = __ATTR(name, 0444, _show, NULL), \ + .mask = _mask, \ + } + +static THROTTLE_ATTR_RO_FUNC(reasons, 0, reasons_show); +static THROTTLE_ATTR_RO(status, U32_MAX); +static THROTTLE_ATTR_RO(reason_pl1, POWER_LIMIT_1_MASK); +static THROTTLE_ATTR_RO(reason_pl2, POWER_LIMIT_2_MASK); +static THROTTLE_ATTR_RO(reason_pl4, POWER_LIMIT_4_MASK); +static THROTTLE_ATTR_RO(reason_thermal, THERMAL_LIMIT_MASK); +static THROTTLE_ATTR_RO(reason_prochot, PROCHOT_MASK); +static THROTTLE_ATTR_RO(reason_ratl, RATL_MASK); +static THROTTLE_ATTR_RO(reason_vr_thermalert, VR_THERMALERT_MASK); +static THROTTLE_ATTR_RO(reason_vr_tdc, VR_TDC_MASK); static struct attribute *throttle_attrs[] = { - &attr_status.attr, - &attr_reason_pl1.attr, - &attr_reason_pl2.attr, - &attr_reason_pl4.attr, - &attr_reason_thermal.attr, - &attr_reason_prochot.attr, - &attr_reason_ratl.attr, - &attr_reason_vr_thermalert.attr, - &attr_reason_vr_tdc.attr, + &attr_reasons.attr.attr, + &attr_status.attr.attr, + &attr_reason_pl1.attr.attr, + &attr_reason_pl2.attr.attr, + &attr_reason_pl4.attr.attr, + &attr_reason_thermal.attr.attr, + &attr_reason_prochot.attr.attr, + &attr_reason_ratl.attr.attr, + &attr_reason_vr_thermalert.attr.attr, + &attr_reason_vr_tdc.attr.attr, + NULL +}; + +static THROTTLE_ATTR_RO(reason_vr_thermal, VR_THERMAL_MASK); +static THROTTLE_ATTR_RO(reason_soc_thermal, SOC_THERMAL_LIMIT_MASK); +static THROTTLE_ATTR_RO(reason_mem_thermal, MEM_THERMAL_MASK); +static THROTTLE_ATTR_RO(reason_iccmax, ICCMAX_MASK); +static THROTTLE_ATTR_RO(reason_soc_avg_thermal, SOC_AVG_THERMAL_MASK); +static THROTTLE_ATTR_RO(reason_fastvmode, FASTVMODE_MASK); +static THROTTLE_ATTR_RO(reason_psys_pl1, PSYS_PL1_MASK); +static THROTTLE_ATTR_RO(reason_psys_pl2, PSYS_PL2_MASK); +static THROTTLE_ATTR_RO(reason_p0_freq, P0_FREQ_MASK); +static THROTTLE_ATTR_RO(reason_psys_crit, PSYS_CRIT_MASK); + +static struct attribute *cri_throttle_attrs[] = { + /* Common */ + &attr_reasons.attr.attr, + &attr_status.attr.attr, + &attr_reason_pl1.attr.attr, + &attr_reason_pl2.attr.attr, + &attr_reason_pl4.attr.attr, + &attr_reason_prochot.attr.attr, + &attr_reason_ratl.attr.attr, + /* CRI */ + &attr_reason_vr_thermal.attr.attr, + &attr_reason_soc_thermal.attr.attr, + &attr_reason_mem_thermal.attr.attr, + &attr_reason_iccmax.attr.attr, + &attr_reason_soc_avg_thermal.attr.attr, + &attr_reason_fastvmode.attr.attr, + &attr_reason_psys_pl1.attr.attr, + &attr_reason_psys_pl2.attr.attr, + &attr_reason_p0_freq.attr.attr, + &attr_reason_psys_crit.attr.attr, NULL }; @@ -231,19 +232,37 @@ static const struct attribute_group throttle_group_attrs = { .attrs = throttle_attrs, }; +static const struct attribute_group cri_throttle_group_attrs = { + .name = "throttle", + .attrs = cri_throttle_attrs, +}; + +static const struct attribute_group *get_platform_throttle_group(struct xe_device *xe) +{ + switch (xe->info.platform) { + case XE_CRESCENTISLAND: + return &cri_throttle_group_attrs; + default: + return &throttle_group_attrs; + } +} + static void gt_throttle_sysfs_fini(void *arg) { struct xe_gt *gt = arg; + struct xe_device *xe = gt_to_xe(gt); + const struct attribute_group *group = get_platform_throttle_group(xe); - sysfs_remove_group(gt->freq, &throttle_group_attrs); + sysfs_remove_group(gt->freq, group); } int xe_gt_throttle_init(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + const struct attribute_group *group = get_platform_throttle_group(xe); int err; - err = sysfs_create_group(gt->freq, &throttle_group_attrs); + err = sysfs_create_group(gt->freq, group); if (err) return err; diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c index 4e61c5e39bcb..bd5260221d8d 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.c +++ b/drivers/gpu/drm/xe/xe_gt_topology.c @@ -148,7 +148,11 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask) if (!xe_gt_topology_report_l3(gt)) return; - if (GRAPHICS_VER(xe) >= 30) { + if (GRAPHICS_VER(xe) >= 35) { + u32 fuse_val = xe_mmio_read32(mmio, MIRROR_L3BANK_ENABLE); + + bitmap_from_arr32(l3_bank_mask, &fuse_val, 32); + } else if (GRAPHICS_VER(xe) >= 30) { xe_l3_bank_mask_t per_node = {}; u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3); u32 mirror_l3bank_enable = xe_mmio_read32(mmio, MIRROR_L3BANK_ENABLE); @@ -269,8 +273,14 @@ static const char *eu_type_to_str(enum xe_gt_eu_type eu_type) return NULL; } -void -xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p) +/** + * xe_gt_topology_dump() - Dump GT topology into a drm printer. + * @gt: the &xe_gt + * @p: the &drm_printer + * + * Return: always 0. + */ +int xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p) { drm_printf(p, "dss mask (geometry): %*pb\n", XE_MAX_DSS_FUSE_BITS, gt->fuse_topo.g_dss_mask); @@ -285,6 +295,7 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p) if (xe_gt_topology_report_l3(gt)) drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS, gt->fuse_topo.l3_bank_mask); + return 0; } /* @@ -298,6 +309,13 @@ xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum) return find_next_bit(mask, XE_MAX_DSS_FUSE_BITS, groupnum * groupsize); } +/* Used to obtain the index of the first L3 bank. */ +unsigned int +xe_l3_bank_mask_ffs(const xe_l3_bank_mask_t mask) +{ + return find_first_bit(mask, XE_MAX_L3_BANK_MASK_BITS); +} + /** * xe_gt_topology_has_dss_in_quadrant - check fusing of DSS in GT quadrant * @gt: GT to check diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h index 5e62f5949b7b..162d603c9b81 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.h +++ b/drivers/gpu/drm/xe/xe_gt_topology.h @@ -23,7 +23,7 @@ struct drm_printer; void xe_gt_topology_init(struct xe_gt *gt); -void xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p); +int xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p); /** * xe_gt_topology_mask_last_dss() - Returns the index of the last DSS in a mask. @@ -40,6 +40,8 @@ xe_gt_topology_mask_last_dss(const xe_dss_mask_t mask) unsigned int xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum); +unsigned int +xe_l3_bank_mask_ffs(const xe_l3_bank_mask_t mask); bool xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad); diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index 66158105aca5..0a728180b6fe 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -66,6 +66,7 @@ struct xe_mmio_range { */ enum xe_steering_type { L3BANK, + NODE, MSLICE, LNCF, DSS, @@ -73,6 +74,13 @@ enum xe_steering_type { SQIDI_PSMI, /* + * Although most GAM ranges must be steered to (0,0) and thus use the + * INSTANCE0 type farther down, some platforms have special rules + * for specific subtypes that require steering to (1,0) instead. + */ + GAM1, + + /* * On some platforms there are multiple types of MCR registers that * will always return a non-terminated value at instance (0, 0). We'll * lump those all into a single category to keep things simple. @@ -202,81 +210,16 @@ struct xe_gt { /** * @usm.bb_pool: Pool from which batchbuffers, for USM operations * (e.g. migrations, fixing page tables), are allocated. - * Dedicated pool needed so USM operations to not get blocked + * Dedicated pool needed so USM operations do not get blocked * behind any user operations which may have resulted in a * fault. */ struct xe_sa_manager *bb_pool; /** * @usm.reserved_bcs_instance: reserved BCS instance used for USM - * operations (e.g. mmigrations, fixing page tables) + * operations (e.g. migrations, fixing page tables) */ u16 reserved_bcs_instance; - /** @usm.pf_wq: page fault work queue, unbound, high priority */ - struct workqueue_struct *pf_wq; - /** @usm.acc_wq: access counter work queue, unbound, high priority */ - struct workqueue_struct *acc_wq; - /** - * @usm.pf_queue: Page fault queue used to sync faults so faults can - * be processed not under the GuC CT lock. The queue is sized so - * it can sync all possible faults (1 per physical engine). - * Multiple queues exists for page faults from different VMs are - * be processed in parallel. - */ - struct pf_queue { - /** @usm.pf_queue.gt: back pointer to GT */ - struct xe_gt *gt; - /** @usm.pf_queue.data: data in the page fault queue */ - u32 *data; - /** - * @usm.pf_queue.num_dw: number of DWORDS in the page - * fault queue. Dynamically calculated based on the number - * of compute resources available. - */ - u32 num_dw; - /** - * @usm.pf_queue.tail: tail pointer in DWs for page fault queue, - * moved by worker which processes faults (consumer). - */ - u16 tail; - /** - * @usm.pf_queue.head: head pointer in DWs for page fault queue, - * moved by G2H handler (producer). - */ - u16 head; - /** @usm.pf_queue.lock: protects page fault queue */ - spinlock_t lock; - /** @usm.pf_queue.worker: to process page faults */ - struct work_struct worker; -#define NUM_PF_QUEUE 4 - } pf_queue[NUM_PF_QUEUE]; - /** - * @usm.acc_queue: Same as page fault queue, cannot process access - * counters under CT lock. - */ - struct acc_queue { - /** @usm.acc_queue.gt: back pointer to GT */ - struct xe_gt *gt; -#define ACC_QUEUE_NUM_DW 128 - /** @usm.acc_queue.data: data in the page fault queue */ - u32 data[ACC_QUEUE_NUM_DW]; - /** - * @usm.acc_queue.tail: tail pointer in DWs for access counter queue, - * moved by worker which processes counters - * (consumer). - */ - u16 tail; - /** - * @usm.acc_queue.head: head pointer in DWs for access counter queue, - * moved by G2H handler (producer). - */ - u16 head; - /** @usm.acc_queue.lock: protects page fault queue */ - spinlock_t lock; - /** @usm.acc_queue.worker: to process access counters */ - struct work_struct worker; -#define NUM_ACC_QUEUE 4 - } acc_queue[NUM_ACC_QUEUE]; } usm; /** @ordered_wq: used to serialize GT resets and TDRs */ @@ -387,7 +330,7 @@ struct xe_gt { /** * @wa_active.oob_initialized: mark oob as initialized to help * detecting misuse of XE_GT_WA() - it can only be called on - * initialization after OOB WAs have being processed + * initialization after OOB WAs have been processed */ bool oob_initialized; } wa_active; diff --git a/drivers/gpu/drm/xe/xe_guard.h b/drivers/gpu/drm/xe/xe_guard.h new file mode 100644 index 000000000000..333f8e13b5a1 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guard.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_GUARD_H_ +#define _XE_GUARD_H_ + +#include <linux/spinlock.h> + +/** + * struct xe_guard - Simple logic to protect a feature. + * + * Implements simple semaphore-like logic that can be used to lockdown the + * feature unless it is already in use. Allows enabling of the otherwise + * incompatible features, where we can't follow the strict owner semantics + * required by the &rw_semaphore. + * + * NOTE! It shouldn't be used to protect a data, use &rw_semaphore instead. + */ +struct xe_guard { + /** + * @counter: implements simple exclusive/lockdown logic: + * if == 0 then guard/feature is idle/not in use, + * if < 0 then feature is active and can't be locked-down, + * if > 0 then feature is lockded-down and can't be activated. + */ + int counter; + + /** @name: the name of the guard (useful for debug) */ + const char *name; + + /** @owner: the info about the last owner of the guard (for debug) */ + void *owner; + + /** @lock: protects guard's data */ + spinlock_t lock; +}; + +/** + * xe_guard_init() - Initialize the guard. + * @guard: the &xe_guard to init + * @name: name of the guard + */ +static inline void xe_guard_init(struct xe_guard *guard, const char *name) +{ + spin_lock_init(&guard->lock); + guard->counter = 0; + guard->name = name; +} + +/** + * xe_guard_arm() - Arm the guard for the exclusive/lockdown mode. + * @guard: the &xe_guard to arm + * @lockdown: arm for lockdown(true) or exclusive(false) mode + * @who: optional owner info (for debug only) + * + * Multiple lockdown requests are allowed. + * Only single exclusive access can be granted. + * Will fail if the guard is already in exclusive mode. + * On success, must call the xe_guard_disarm() to release. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int xe_guard_arm(struct xe_guard *guard, bool lockdown, void *who) +{ + guard(spinlock)(&guard->lock); + + if (lockdown) { + if (guard->counter < 0) + return -EBUSY; + guard->counter++; + } else { + if (guard->counter > 0) + return -EPERM; + if (guard->counter < 0) + return -EUSERS; + guard->counter--; + } + + guard->owner = who; + return 0; +} + +/** + * xe_guard_disarm() - Disarm the guard from exclusive/lockdown mode. + * @guard: the &xe_guard to disarm + * @lockdown: disarm from lockdown(true) or exclusive(false) mode + * + * Return: true if successfully disarmed or false in case of mismatch. + */ +static inline bool xe_guard_disarm(struct xe_guard *guard, bool lockdown) +{ + guard(spinlock)(&guard->lock); + + if (lockdown) { + if (guard->counter <= 0) + return false; + guard->counter--; + } else { + if (guard->counter != -1) + return false; + guard->counter++; + } + return true; +} + +/** + * xe_guard_mode_str() - Convert guard mode into a string. + * @lockdown: flag used to select lockdown or exclusive mode + * + * Return: "lockdown" or "exclusive" string. + */ +static inline const char *xe_guard_mode_str(bool lockdown) +{ + return lockdown ? "lockdown" : "exclusive"; +} + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 00789844ea4d..a686b04879d6 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -5,6 +5,7 @@ #include "xe_guc.h" +#include <linux/iopoll.h> #include <drm/drm_managed.h> #include <generated/xe_wa_oob.h> @@ -23,6 +24,7 @@ #include "xe_gt_printk.h" #include "xe_gt_sriov_vf.h" #include "xe_gt_throttle.h" +#include "xe_gt_sriov_pf_migration.h" #include "xe_guc_ads.h" #include "xe_guc_buf.h" #include "xe_guc_capture.h" @@ -39,6 +41,7 @@ #include "xe_mmio.h" #include "xe_platform_types.h" #include "xe_sriov.h" +#include "xe_sriov_pf_migration.h" #include "xe_uc.h" #include "xe_uc_fw.h" #include "xe_wa.h" @@ -90,6 +93,9 @@ static u32 guc_ctl_feature_flags(struct xe_guc *guc) if (xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev))) flags |= GUC_CTL_ENABLE_PSMI_LOGGING; + if (xe_guc_using_main_gamctrl_queues(guc)) + flags |= GUC_CTL_MAIN_GAMCTRL_QUEUES; + return flags; } @@ -817,6 +823,14 @@ static int vf_guc_init_post_hwconfig(struct xe_guc *guc) return 0; } +static u32 guc_additional_cache_size(struct xe_device *xe) +{ + if (IS_SRIOV_PF(xe) && xe_sriov_pf_migration_supported(xe)) + return XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE; + else + return 0; /* Fallback to default size */ +} + /** * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load * @guc: The GuC object @@ -856,7 +870,8 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc) if (ret) return ret; - ret = xe_guc_buf_cache_init(&guc->buf); + ret = xe_guc_buf_cache_init_with_size(&guc->buf, + guc_additional_cache_size(guc_to_xe(guc))); if (ret) return ret; @@ -971,20 +986,93 @@ static int guc_xfer_rsa(struct xe_guc *guc) } /* - * Check a previously read GuC status register (GUC_STATUS) looking for - * known terminal states (either completion or failure) of either the - * microkernel status field or the boot ROM status field. Returns +1 for - * successful completion, -1 for failure and 0 for any intermediate state. + * Wait for the GuC to start up. + * + * Measurements indicate this should take no more than 20ms (assuming the GT + * clock is at maximum frequency). However, thermal throttling and other issues + * can prevent the clock hitting max and thus making the load take significantly + * longer. Allow up to 3s as a safety margin in normal builds. For + * CONFIG_DRM_XE_DEBUG allow up to 10s to account for slower execution, issues + * in PCODE, driver, fan, etc. + * + * Keep checking the GUC_STATUS every 10ms with a debug message every 100 + * attempts as a "I'm slow, but alive" message. Regardless, if it takes more + * than 200ms, emit a warning. + */ + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +#define GUC_LOAD_TIMEOUT_SEC 20 +#else +#define GUC_LOAD_TIMEOUT_SEC 3 +#endif +#define GUC_LOAD_TIME_WARN_MSEC 200 + +static void print_load_status_err(struct xe_gt *gt, u32 status) +{ + struct xe_mmio *mmio = >->mmio; + u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status); + u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status); + + xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n", + REG_FIELD_GET(GS_MIA_IN_RESET, status), + bootrom, ukernel, + REG_FIELD_GET(GS_MIA_MASK, status), + REG_FIELD_GET(GS_AUTH_STATUS_MASK, status)); + + switch (bootrom) { + case XE_BOOTROM_STATUS_NO_KEY_FOUND: + xe_gt_err(gt, "invalid key requested, header = 0x%08X\n", + xe_mmio_read32(mmio, GUC_HEADER_INFO)); + break; + case XE_BOOTROM_STATUS_RSA_FAILED: + xe_gt_err(gt, "firmware signature verification failed\n"); + break; + case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE: + xe_gt_err(gt, "firmware production part check failure\n"); + break; + } + + switch (ukernel) { + case XE_GUC_LOAD_STATUS_HWCONFIG_START: + xe_gt_err(gt, "still extracting hwconfig table.\n"); + break; + case XE_GUC_LOAD_STATUS_EXCEPTION: + xe_gt_err(gt, "firmware exception. EIP: %#x\n", + xe_mmio_read32(mmio, SOFT_SCRATCH(13))); + break; + case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID: + xe_gt_err(gt, "illegal init/ADS data\n"); + break; + case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID: + xe_gt_err(gt, "illegal register in save/restore workaround list\n"); + break; + case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR: + xe_gt_err(gt, "illegal workaround KLV data\n"); + break; + case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG: + xe_gt_err(gt, "illegal feature flag specified\n"); + break; + } +} + +/* + * Check GUC_STATUS looking for known terminal states (either completion or + * failure) of either the microkernel status field or the boot ROM status field. + * + * Returns 1 for successful completion, -1 for failure and 0 for any + * intermediate state. */ -static int guc_load_done(u32 status) +static int guc_load_done(struct xe_gt *gt, u32 *status, u32 *tries) { - u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, status); - u32 br_val = REG_FIELD_GET(GS_BOOTROM_MASK, status); + u32 ukernel, bootrom; + + *status = xe_mmio_read32(>->mmio, GUC_STATUS); + ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, *status); + bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, *status); - switch (uk_val) { + switch (ukernel) { case XE_GUC_LOAD_STATUS_READY: return 1; - case XE_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH: case XE_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH: case XE_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE: @@ -1000,7 +1088,7 @@ static int guc_load_done(u32 status) return -1; } - switch (br_val) { + switch (bootrom) { case XE_BOOTROM_STATUS_NO_KEY_FOUND: case XE_BOOTROM_STATUS_RSA_FAILED: case XE_BOOTROM_STATUS_PAVPC_FAILED: @@ -1014,165 +1102,58 @@ static int guc_load_done(u32 status) return -1; } - return 0; -} + if (++*tries >= 100) { + struct xe_guc_pc *guc_pc = >->uc.guc.pc; -static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc) -{ - u32 freq; - int ret = xe_guc_pc_get_cur_freq(guc_pc, &freq); + *tries = 0; + xe_gt_dbg(gt, "GuC load still in progress, freq = %dMHz (req %dMHz), status = 0x%08X [0x%02X/%02X]\n", + xe_guc_pc_get_act_freq(guc_pc), + xe_guc_pc_get_cur_freq_fw(guc_pc), + *status, ukernel, bootrom); + } - return ret ? ret : freq; + return 0; } -/* - * Wait for the GuC to start up. - * - * Measurements indicate this should take no more than 20ms (assuming the GT - * clock is at maximum frequency). However, thermal throttling and other issues - * can prevent the clock hitting max and thus making the load take significantly - * longer. Allow up to 200ms as a safety margin for real world worst case situations. - * - * However, bugs anywhere from KMD to GuC to PCODE to fan failure in a CI farm can - * lead to even longer times. E.g. if the GT is clamped to minimum frequency then - * the load times can be in the seconds range. So the timeout is increased for debug - * builds to ensure that problems can be correctly analysed. For release builds, the - * timeout is kept short so that users don't wait forever to find out that there is a - * problem. In either case, if the load took longer than is reasonable even with some - * 'sensible' throttling, then flag a warning because something is not right. - * - * Note that there is a limit on how long an individual usleep_range() can wait for, - * hence longer waits require wrapping a shorter wait in a loop. - * - * Note that the only reason an end user should hit the shorter timeout is in case of - * extreme thermal throttling. And a system that is that hot during boot is probably - * dead anyway! - */ -#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) -#define GUC_LOAD_RETRY_LIMIT 20 -#else -#define GUC_LOAD_RETRY_LIMIT 3 -#endif -#define GUC_LOAD_TIME_WARN_MS 200 - static int guc_wait_ucode(struct xe_guc *guc) { struct xe_gt *gt = guc_to_gt(guc); - struct xe_mmio *mmio = >->mmio; struct xe_guc_pc *guc_pc = >->uc.guc.pc; - ktime_t before, after, delta; - int load_done; - u32 status = 0; - int count = 0; + u32 before_freq, act_freq, cur_freq; + u32 status = 0, tries = 0; + ktime_t before; u64 delta_ms; - u32 before_freq; + int ret; before_freq = xe_guc_pc_get_act_freq(guc_pc); before = ktime_get(); - /* - * Note, can't use any kind of timing information from the call to xe_mmio_wait. - * It could return a thousand intermediate stages at random times. Instead, must - * manually track the total time taken and locally implement the timeout. - */ - do { - u32 last_status = status & (GS_UKERNEL_MASK | GS_BOOTROM_MASK); - int ret; - - /* - * Wait for any change (intermediate or terminal) in the status register. - * Note, the return value is a don't care. The only failure code is timeout - * but the timeouts need to be accumulated over all the intermediate partial - * timeouts rather than allowing a huge timeout each time. So basically, need - * to treat a timeout no different to a value change. - */ - ret = xe_mmio_wait32_not(mmio, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK, - last_status, 1000 * 1000, &status, false); - if (ret < 0) - count++; - after = ktime_get(); - delta = ktime_sub(after, before); - delta_ms = ktime_to_ms(delta); - - load_done = guc_load_done(status); - if (load_done != 0) - break; - if (delta_ms >= (GUC_LOAD_RETRY_LIMIT * 1000)) - break; + ret = poll_timeout_us(ret = guc_load_done(gt, &status, &tries), ret, + 10 * USEC_PER_MSEC, + GUC_LOAD_TIMEOUT_SEC * USEC_PER_SEC, false); - xe_gt_dbg(gt, "load still in progress, timeouts = %d, freq = %dMHz (req %dMHz), status = 0x%08X [0x%02X/%02X]\n", - count, xe_guc_pc_get_act_freq(guc_pc), - guc_pc_get_cur_freq(guc_pc), status, - REG_FIELD_GET(GS_BOOTROM_MASK, status), - REG_FIELD_GET(GS_UKERNEL_MASK, status)); - } while (1); + delta_ms = ktime_to_ms(ktime_sub(ktime_get(), before)); + act_freq = xe_guc_pc_get_act_freq(guc_pc); + cur_freq = xe_guc_pc_get_cur_freq_fw(guc_pc); - if (load_done != 1) { - u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status); - u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status); - - xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz), done = %d\n", + if (ret) { + xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz)\n", status, delta_ms, xe_guc_pc_get_act_freq(guc_pc), - guc_pc_get_cur_freq(guc_pc), load_done); - xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n", - REG_FIELD_GET(GS_MIA_IN_RESET, status), - bootrom, ukernel, - REG_FIELD_GET(GS_MIA_MASK, status), - REG_FIELD_GET(GS_AUTH_STATUS_MASK, status)); - - switch (bootrom) { - case XE_BOOTROM_STATUS_NO_KEY_FOUND: - xe_gt_err(gt, "invalid key requested, header = 0x%08X\n", - xe_mmio_read32(mmio, GUC_HEADER_INFO)); - break; - - case XE_BOOTROM_STATUS_RSA_FAILED: - xe_gt_err(gt, "firmware signature verification failed\n"); - break; - - case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE: - xe_gt_err(gt, "firmware production part check failure\n"); - break; - } - - switch (ukernel) { - case XE_GUC_LOAD_STATUS_HWCONFIG_START: - xe_gt_err(gt, "still extracting hwconfig table.\n"); - break; - - case XE_GUC_LOAD_STATUS_EXCEPTION: - xe_gt_err(gt, "firmware exception. EIP: %#x\n", - xe_mmio_read32(mmio, SOFT_SCRATCH(13))); - break; - - case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID: - xe_gt_err(gt, "illegal init/ADS data\n"); - break; - - case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID: - xe_gt_err(gt, "illegal register in save/restore workaround list\n"); - break; - - case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR: - xe_gt_err(gt, "illegal workaround KLV data\n"); - break; - - case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG: - xe_gt_err(gt, "illegal feature flag specified\n"); - break; - } + xe_guc_pc_get_cur_freq_fw(guc_pc)); + print_load_status_err(gt, status); return -EPROTO; - } else if (delta_ms > GUC_LOAD_TIME_WARN_MS) { - xe_gt_warn(gt, "excessive init time: %lldms! [status = 0x%08X, timeouts = %d]\n", - delta_ms, status, count); - xe_gt_warn(gt, "excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reasons = 0x%08X]\n", - xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc), - before_freq, xe_gt_throttle_get_limit_reasons(gt)); + } + + if (delta_ms > GUC_LOAD_TIME_WARN_MSEC) { + xe_gt_warn(gt, "GuC load: excessive init time: %lldms! [status = 0x%08X]\n", + delta_ms, status); + xe_gt_warn(gt, "GuC load: excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reasons = 0x%08X]\n", + act_freq, cur_freq, before_freq, + xe_gt_throttle_get_limit_reasons(gt)); } else { - xe_gt_dbg(gt, "init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X, timeouts = %d\n", - delta_ms, xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc), - before_freq, status, count); + xe_gt_dbg(gt, "GuC load: init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X\n", + delta_ms, act_freq, cur_freq, before_freq, status); } return 0; @@ -1288,8 +1269,13 @@ int xe_guc_min_load_for_hwconfig(struct xe_guc *guc) int xe_guc_upload(struct xe_guc *guc) { + struct xe_gt *gt = guc_to_gt(guc); + xe_guc_ads_populate(&guc->ads); + if (xe_guc_using_main_gamctrl_queues(guc)) + xe_mmio_write32(>->mmio, MAIN_GAMCTRL_MODE, MAIN_GAMCTRL_QUEUE_SELECT); + return __xe_guc_upload(guc); } @@ -1472,7 +1458,7 @@ timeout: BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1); ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask, - 1000000, &header, false); + 2000000, &header, false); if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC)) @@ -1690,6 +1676,44 @@ void xe_guc_declare_wedged(struct xe_guc *guc) xe_guc_submit_wedge(guc); } +/** + * xe_guc_using_main_gamctrl_queues() - Detect which reporting queues to use. + * @guc: The GuC object + * + * For Xe3p and beyond, we want to program the hardware to use the + * "Main GAMCTRL queue" rather than the legacy queue before we upload + * the GuC firmware. This will allow the GuC to use a new set of + * registers for pagefault handling and avoid some unnecessary + * complications with MCR register range handling. + * + * Return: true if can use new main gamctrl queues. + */ +bool xe_guc_using_main_gamctrl_queues(struct xe_guc *guc) +{ + struct xe_gt *gt = guc_to_gt(guc); + + /* + * For Xe3p media gt (35), the GuC and the CS subunits may be still Xe3 + * that lacks the Main GAMCTRL support. Reserved bits from the GMD_ID + * inform the IP version of the subunits. + */ + if (xe_gt_is_media_type(gt) && MEDIA_VER(gt_to_xe(gt)) == 35) { + u32 val = xe_mmio_read32(>->mmio, GMD_ID); + u32 subip = REG_FIELD_GET(GMD_ID_SUBIP_FLAG_MASK, val); + + if (!subip) + return true; + + xe_gt_WARN(gt, subip != 1, + "GMD_ID has unknown value in the SUBIP_FLAG field - 0x%x\n", + subip); + + return false; + } + + return GT_VER(gt) >= 35; +} + #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) #include "tests/xe_guc_g2g_test.c" #endif diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h index 1cca05967e62..e2d4c5f44ae3 100644 --- a/drivers/gpu/drm/xe/xe_guc.h +++ b/drivers/gpu/drm/xe/xe_guc.h @@ -52,6 +52,7 @@ void xe_guc_stop_prepare(struct xe_guc *guc); void xe_guc_stop(struct xe_guc *guc); int xe_guc_start(struct xe_guc *guc); void xe_guc_declare_wedged(struct xe_guc *guc); +bool xe_guc_using_main_gamctrl_queues(struct xe_guc *guc); #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *payload, u32 len); diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index 58e0b0294a5b..bcb85a1bf26d 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -18,6 +18,7 @@ #include "xe_bo.h" #include "xe_gt.h" #include "xe_gt_ccs_mode.h" +#include "xe_gt_mcr.h" #include "xe_gt_printk.h" #include "xe_guc.h" #include "xe_guc_buf.h" @@ -30,7 +31,6 @@ #include "xe_platform_types.h" #include "xe_uc_fw.h" #include "xe_wa.h" -#include "xe_gt_mcr.h" /* Slack of a few additional entries per engine */ #define ADS_REGSET_EXTRA_MAX 8 @@ -820,16 +820,20 @@ static void guc_mmio_reg_state_init(struct xe_guc_ads *ads) static void guc_um_init_params(struct xe_guc_ads *ads) { u32 um_queue_offset = guc_ads_um_queues_offset(ads); + struct xe_guc *guc = ads_to_guc(ads); u64 base_dpa; u32 base_ggtt; + bool with_dpa; int i; + with_dpa = !xe_guc_using_main_gamctrl_queues(guc); + base_ggtt = xe_bo_ggtt_addr(ads->bo) + um_queue_offset; base_dpa = xe_bo_main_addr(ads->bo, PAGE_SIZE) + um_queue_offset; for (i = 0; i < GUC_UM_HW_QUEUE_MAX; ++i) { ads_blob_write(ads, um_init_params.queue_params[i].base_dpa, - base_dpa + (i * GUC_UM_QUEUE_SIZE)); + with_dpa ? (base_dpa + (i * GUC_UM_QUEUE_SIZE)) : 0); ads_blob_write(ads, um_init_params.queue_params[i].base_ggtt_address, base_ggtt + (i * GUC_UM_QUEUE_SIZE)); ads_blob_write(ads, um_init_params.queue_params[i].size_in_bytes, diff --git a/drivers/gpu/drm/xe/xe_guc_ads_types.h b/drivers/gpu/drm/xe/xe_guc_ads_types.h index 70c132458ac3..48a8e092023f 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads_types.h +++ b/drivers/gpu/drm/xe/xe_guc_ads_types.h @@ -14,7 +14,7 @@ struct xe_bo; * struct xe_guc_ads - GuC additional data structures (ADS) */ struct xe_guc_ads { - /** @bo: XE BO for GuC ads blob */ + /** @bo: Xe BO for GuC ads blob */ struct xe_bo *bo; /** @golden_lrc_size: golden LRC size */ size_t golden_lrc_size; diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c index 502ca3a4ee60..3ce442500130 100644 --- a/drivers/gpu/drm/xe/xe_guc_buf.c +++ b/drivers/gpu/drm/xe/xe_guc_buf.c @@ -13,6 +13,8 @@ #include "xe_guc_buf.h" #include "xe_sa.h" +#define XE_GUC_BUF_CACHE_DEFAULT_SIZE SZ_8K + static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache) { return container_of(cache, struct xe_guc, buf); @@ -23,21 +25,12 @@ static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache) return guc_to_gt(cache_to_guc(cache)); } -/** - * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache. - * @cache: the &xe_guc_buf_cache to initialize - * - * The Buffer Cache allows to obtain a reusable buffer that can be used to pass - * indirect H2G data to GuC without a need to create a ad-hoc allocation. - * - * Return: 0 on success or a negative error code on failure. - */ -int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache) +static int guc_buf_cache_init(struct xe_guc_buf_cache *cache, u32 size) { struct xe_gt *gt = cache_to_gt(cache); struct xe_sa_manager *sam; - sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32)); + sam = __xe_sa_bo_manager_init(gt_to_tile(gt), size, 0, sizeof(u32)); if (IS_ERR(sam)) return PTR_ERR(sam); cache->sam = sam; @@ -49,6 +42,35 @@ int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache) } /** + * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache. + * @cache: the &xe_guc_buf_cache to initialize + * + * The Buffer Cache allows to obtain a reusable buffer that can be used to pass + * data to GuC or read data from GuC without a need to create a ad-hoc allocation. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache) +{ + return guc_buf_cache_init(cache, XE_GUC_BUF_CACHE_DEFAULT_SIZE); +} + +/** + * xe_guc_buf_cache_init_with_size() - Initialize the GuC Buffer Cache. + * @cache: the &xe_guc_buf_cache to initialize + * @size: size in bytes + * + * Like xe_guc_buf_cache_init(), except it allows the caller to make the cache + * buffer larger, allowing to accommodate larger objects. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_guc_buf_cache_init_with_size(struct xe_guc_buf_cache *cache, u32 size) +{ + return guc_buf_cache_init(cache, max(XE_GUC_BUF_CACHE_DEFAULT_SIZE, size)); +} + +/** * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports. * @cache: the &xe_guc_buf_cache to query * @@ -116,6 +138,19 @@ void xe_guc_buf_release(const struct xe_guc_buf buf) } /** + * xe_guc_buf_sync_read() - Copy the data from the GPU memory to the sub-allocation. + * @buf: the &xe_guc_buf to sync + * + * Return: a CPU pointer of the sub-allocation. + */ +void *xe_guc_buf_sync_read(const struct xe_guc_buf buf) +{ + xe_sa_bo_sync_read(buf.sa); + + return xe_sa_bo_cpu_addr(buf.sa); +} + +/** * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory. * @buf: the &xe_guc_buf to flush * diff --git a/drivers/gpu/drm/xe/xe_guc_buf.h b/drivers/gpu/drm/xe/xe_guc_buf.h index 0d67604d96bd..e3cca553fb00 100644 --- a/drivers/gpu/drm/xe/xe_guc_buf.h +++ b/drivers/gpu/drm/xe/xe_guc_buf.h @@ -12,6 +12,7 @@ #include "xe_guc_buf_types.h" int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache); +int xe_guc_buf_cache_init_with_size(struct xe_guc_buf_cache *cache, u32 size); u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache); struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords); struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache, @@ -30,6 +31,7 @@ static inline bool xe_guc_buf_is_valid(const struct xe_guc_buf buf) } void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf); +void *xe_guc_buf_sync_read(const struct xe_guc_buf buf); u64 xe_guc_buf_flush(const struct xe_guc_buf buf); u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf); u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size); diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c index 243dad3e2418..0c1fbe97b8bf 100644 --- a/drivers/gpu/drm/xe/xe_guc_capture.c +++ b/drivers/gpu/drm/xe/xe_guc_capture.c @@ -122,6 +122,7 @@ struct __guc_capture_parsed_output { { RING_IPEHR(0), REG_32BIT, 0, 0, 0, "IPEHR"}, \ { RING_INSTDONE(0), REG_32BIT, 0, 0, 0, "RING_INSTDONE"}, \ { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, 0, "INDIRECT_RING_STATE"}, \ + { RING_CURRENT_LRCA(0), REG_32BIT, 0, 0, 0, "CURRENT_LRCA"}, \ { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \ { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "ACTHD"}, \ { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \ @@ -149,6 +150,9 @@ struct __guc_capture_parsed_output { { SFC_DONE(2), 0, 0, 0, 0, "SFC_DONE[2]"}, \ { SFC_DONE(3), 0, 0, 0, 0, "SFC_DONE[3]"} +#define XE3P_BASE_ENGINE_INSTANCE \ + { RING_CSMQDEBUG(0), REG_32BIT, 0, 0, 0, "CSMQDEBUG"} + /* XE_LP Global */ static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = { COMMON_XELP_BASE_GLOBAL, @@ -195,6 +199,12 @@ static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = { COMMON_BASE_ENGINE_INSTANCE, }; +/* Render / Compute Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe3p_rc_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, + XE3P_BASE_ENGINE_INSTANCE, +}; + /* * Empty list to prevent warnings about unknown class/instance types * as not all class/instance types have entries on all platforms. @@ -245,6 +255,21 @@ static const struct __guc_mmio_reg_descr_group xe_hpg_lists[] = { {} }; + /* List of lists for Xe3p and beyond */ +static const struct __guc_mmio_reg_descr_group xe3p_lists[] = { + MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0), + MAKE_REGLIST(xe_hpg_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), + MAKE_REGLIST(xe3p_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO), + MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO), + MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), + MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER), + MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), + MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), + {} +}; static const char * const capture_list_type_names[] = { "Global", "Class", @@ -292,7 +317,9 @@ guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc, static const struct __guc_mmio_reg_descr_group * guc_capture_get_device_reglist(struct xe_device *xe) { - if (GRAPHICS_VERx100(xe) >= 1255) + if (GRAPHICS_VER(xe) >= 35) + return xe3p_lists; + else if (GRAPHICS_VERx100(xe) >= 1255) return xe_hpg_lists; else return xe_lp_lists; diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 18f6327bf552..4ac434ad216f 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -21,18 +21,18 @@ #include "xe_devcoredump.h" #include "xe_device.h" #include "xe_gt.h" -#include "xe_gt_pagefault.h" #include "xe_gt_printk.h" #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_monitor.h" -#include "xe_gt_sriov_printk.h" #include "xe_guc.h" #include "xe_guc_log.h" +#include "xe_guc_pagefault.h" #include "xe_guc_relay.h" #include "xe_guc_submit.h" #include "xe_guc_tlb_inval.h" #include "xe_map.h" #include "xe_pm.h" +#include "xe_sriov_vf.h" #include "xe_trace_guc.h" static void receive_g2h(struct xe_guc_ct *ct); @@ -93,8 +93,6 @@ struct g2h_fence { bool done; }; -#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo))) - static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer) { memset(g2h_fence, 0, sizeof(*g2h_fence)); @@ -169,6 +167,7 @@ ct_to_xe(struct xe_guc_ct *ct) */ #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K) +#define CTB_H2G_BUFFER_OFFSET (CTB_DESC_SIZE * 2) #define CTB_H2G_BUFFER_SIZE (SZ_4K) #define CTB_G2H_BUFFER_SIZE (SZ_128K) #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2) @@ -192,7 +191,7 @@ long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct) static size_t guc_ct_size(void) { - return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + + return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE; } @@ -200,6 +199,9 @@ static void guc_ct_fini(struct drm_device *drm, void *arg) { struct xe_guc_ct *ct = arg; +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + cancel_work_sync(&ct->dead.worker); +#endif ct_exit_safe_mode(ct); destroy_workqueue(ct->g2h_wq); xa_destroy(&ct->fence_lookup); @@ -223,6 +225,12 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct) xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE)); + err = drmm_mutex_init(&xe->drm, &ct->lock); + if (err) + return err; + + primelockdep(ct); + ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM); if (!ct->g2h_wq) return -ENOMEM; @@ -234,16 +242,13 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct) #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) spin_lock_init(&ct->dead.lock); INIT_WORK(&ct->dead.worker, ct_dead_worker_func); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC) + stack_depot_init(); +#endif #endif init_waitqueue_head(&ct->wq); init_waitqueue_head(&ct->g2h_fence_wq); - err = drmm_mutex_init(&xe->drm, &ct->lock); - if (err) - return err; - - primelockdep(ct); - err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct); if (err) return err; @@ -333,7 +338,7 @@ static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g, h2g->desc = *map; xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc)); - h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2); + h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET); } static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h, @@ -351,7 +356,7 @@ static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h, g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE); xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc)); - g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 + + g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE); } @@ -362,7 +367,7 @@ static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct) int err; desc_addr = xe_bo_ggtt_addr(ct->bo); - ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2; + ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET; size = ct->ctbs.h2g.info.size * sizeof(u32); err = xe_guc_self_cfg64(guc, @@ -389,7 +394,7 @@ static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct) int err; desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE; - ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 + + ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE; size = ct->ctbs.g2h.info.size * sizeof(u32); @@ -503,7 +508,7 @@ static void ct_exit_safe_mode(struct xe_guc_ct *ct) xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n"); } -int xe_guc_ct_enable(struct xe_guc_ct *ct) +static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register) { struct xe_device *xe = ct_to_xe(ct); struct xe_gt *gt = ct_to_gt(ct); @@ -511,21 +516,29 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); - xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo)); - guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); - guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); + if (needs_register) { + xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo)); + guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); + guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); - err = guc_ct_ctb_h2g_register(ct); - if (err) - goto err_out; + err = guc_ct_ctb_h2g_register(ct); + if (err) + goto err_out; - err = guc_ct_ctb_g2h_register(ct); - if (err) - goto err_out; + err = guc_ct_ctb_g2h_register(ct); + if (err) + goto err_out; - err = guc_ct_control_toggle(ct, true); - if (err) - goto err_out; + err = guc_ct_control_toggle(ct, true); + if (err) + goto err_out; + } else { + ct->ctbs.h2g.info.broken = false; + ct->ctbs.g2h.info.broken = false; + /* Skip everything in H2G buffer */ + xe_map_memset(xe, &ct->bo->vmap, CTB_H2G_BUFFER_OFFSET, 0, + CTB_H2G_BUFFER_SIZE); + } guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED); @@ -557,6 +570,32 @@ err_out: return err; } +/** + * xe_guc_ct_restart() - Restart GuC CT + * @ct: the &xe_guc_ct + * + * Restart GuC CT to an empty state without issuing a CT register MMIO command. + * + * Return: 0 on success, or a negative errno on failure. + */ +int xe_guc_ct_restart(struct xe_guc_ct *ct) +{ + return __xe_guc_ct_start(ct, false); +} + +/** + * xe_guc_ct_enable() - Enable GuC CT + * @ct: the &xe_guc_ct + * + * Enable GuC CT to an empty state and issue a CT register MMIO command. + * + * Return: 0 on success, or a negative errno on failure. + */ +int xe_guc_ct_enable(struct xe_guc_ct *ct) +{ + return __xe_guc_ct_start(ct, true); +} + static void stop_g2h_handler(struct xe_guc_ct *ct) { cancel_work_sync(&ct->g2h_worker); @@ -577,6 +616,16 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct) } /** + * xe_guc_ct_flush_and_stop - Flush and stop all processing of G2H / H2G + * @ct: the &xe_guc_ct + */ +void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct) +{ + receive_g2h(ct); + xe_guc_ct_stop(ct); +} + +/** * xe_guc_ct_stop - Set GuC to stopped state * @ct: the &xe_guc_ct * @@ -739,6 +788,28 @@ static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence) return seqno; } +#define MAKE_ACTION(type, __action) \ +({ \ + FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) | \ + FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | \ + GUC_HXG_EVENT_MSG_0_DATA0, __action); \ +}) + +static bool vf_action_can_safely_fail(struct xe_device *xe, u32 action) +{ + /* + * When resuming a VF, we can't reliably track whether context + * registration has completed in the GuC state machine. It is harmless + * to resend the request, as it will fail silently if GUC_HXG_TYPE_EVENT + * is used. Additionally, if there is an H2G protocol issue on a VF, + * subsequent H2G messages sent as GUC_HXG_TYPE_FAST_REQUEST will likely + * fail. + */ + return IS_SRIOV_VF(xe) && xe_sriov_vf_migration_supported(xe) && + (action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC || + action == XE_GUC_ACTION_REGISTER_CONTEXT); +} + #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, @@ -810,18 +881,14 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) | FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value); if (want_response) { - cmd[1] = - FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | - FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | - GUC_HXG_EVENT_MSG_0_DATA0, action[0]); + cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_REQUEST, action[0]); + } else if (vf_action_can_safely_fail(xe, action[0])) { + cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_EVENT, action[0]); } else { fast_req_track(ct, ct_fence_value, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0])); - cmd[1] = - FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) | - FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | - GUC_HXG_EVENT_MSG_0_DATA0, action[0]); + cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_FAST_REQUEST, action[0]); } /* H2G header in cmd[1] replaces action[0] so: */ @@ -854,7 +921,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence) { - struct xe_gt *gt __maybe_unused = ct_to_gt(ct); + struct xe_gt *gt = ct_to_gt(ct); u16 seqno; int ret; @@ -875,7 +942,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, goto out; } - if (ct->state == XE_GUC_CT_STATE_STOPPED) { + if (ct->state == XE_GUC_CT_STATE_STOPPED || xe_gt_recovery_pending(gt)) { ret = -ECANCELED; goto out; } @@ -930,22 +997,15 @@ static void kick_reset(struct xe_guc_ct *ct) static int dequeue_one_g2h(struct xe_guc_ct *ct); -static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, - u32 g2h_len, u32 num_g2h, - struct g2h_fence *g2h_fence) +/* + * wait before retry of sending h2g message + * Return: true if ready for retry, false if the wait timeouted + */ +static bool guc_ct_send_wait_for_retry(struct xe_guc_ct *ct, u32 len, + u32 g2h_len, struct g2h_fence *g2h_fence, + unsigned int *sleep_period_ms) { struct xe_device *xe = ct_to_xe(ct); - struct xe_gt *gt = ct_to_gt(ct); - unsigned int sleep_period_ms = 1; - int ret; - - xe_gt_assert(gt, !g2h_len || !g2h_fence); - lockdep_assert_held(&ct->lock); - xe_device_assert_mem_access(ct_to_xe(ct)); - -try_again: - ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, - g2h_fence); /* * We wait to try to restore credits for about 1 second before bailing. @@ -954,24 +1014,22 @@ try_again: * the case of G2H we process any G2H in the channel, hopefully freeing * credits as we consume the G2H messages. */ - if (unlikely(ret == -EBUSY && - !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) { + if (!h2g_has_room(ct, len + GUC_CTB_HDR_LEN)) { struct guc_ctb *h2g = &ct->ctbs.h2g; - if (sleep_period_ms == 1024) - goto broken; + if (*sleep_period_ms == 1024) + return false; trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail, h2g->info.size, h2g->info.space, len + GUC_CTB_HDR_LEN); - msleep(sleep_period_ms); - sleep_period_ms <<= 1; - - goto try_again; - } else if (unlikely(ret == -EBUSY)) { + msleep(*sleep_period_ms); + *sleep_period_ms <<= 1; + } else { struct xe_device *xe = ct_to_xe(ct); struct guc_ctb *g2h = &ct->ctbs.g2h; + int ret; trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head, desc_read(xe, g2h, tail), @@ -985,7 +1043,7 @@ try_again: (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head) if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding || g2h_avail(ct), HZ)) - goto broken; + return false; #undef g2h_avail ret = dequeue_one_g2h(ct); @@ -993,9 +1051,32 @@ try_again: if (ret != -ECANCELED) xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)", ERR_PTR(ret)); - goto broken; + return false; } + } + return true; +} +static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, + u32 g2h_len, u32 num_g2h, + struct g2h_fence *g2h_fence) +{ + struct xe_gt *gt = ct_to_gt(ct); + unsigned int sleep_period_ms = 1; + int ret; + + xe_gt_assert(gt, !g2h_len || !g2h_fence); + lockdep_assert_held(&ct->lock); + xe_device_assert_mem_access(ct_to_xe(ct)); + +try_again: + ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, + g2h_fence); + + if (unlikely(ret == -EBUSY)) { + if (!guc_ct_send_wait_for_retry(ct, len, g2h_len, g2h_fence, + &sleep_period_ms)) + goto broken; goto try_again; } @@ -1337,6 +1418,10 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) fast_req_report(ct, fence); + /* FIXME: W/A race in the GuC, will get in firmware soon */ + if (xe_gt_recovery_pending(gt)) + return 0; + CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE); return -EPROTO; @@ -1466,10 +1551,6 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) case XE_GUC_ACTION_TLB_INVALIDATION_DONE: ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len); break; - case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY: - ret = xe_guc_access_counter_notify_handler(guc, payload, - adj_len); - break; case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF: ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len); break; @@ -1793,186 +1874,6 @@ static void g2h_worker_func(struct work_struct *w) receive_g2h(ct); } -static void xe_fixup_u64_in_cmds(struct xe_device *xe, struct iosys_map *cmds, - u32 size, u32 idx, s64 shift) -{ - u32 hi, lo; - u64 offset; - - lo = xe_map_rd_ring_u32(xe, cmds, idx, size); - hi = xe_map_rd_ring_u32(xe, cmds, idx + 1, size); - offset = make_u64(hi, lo); - offset += shift; - lo = lower_32_bits(offset); - hi = upper_32_bits(offset); - xe_map_wr_ring_u32(xe, cmds, idx, size, lo); - xe_map_wr_ring_u32(xe, cmds, idx + 1, size, hi); -} - -/* - * Shift any GGTT addresses within a single message left within CTB from - * before post-migration recovery. - * @ct: pointer to CT struct of the target GuC - * @cmds: iomap buffer containing CT messages - * @head: start of the target message within the buffer - * @len: length of the target message - * @size: size of the commands buffer - * @shift: the address shift to be added to each GGTT reference - * Return: true if the message was fixed or needed no fixups, false on failure - */ -static bool ct_fixup_ggtt_in_message(struct xe_guc_ct *ct, - struct iosys_map *cmds, u32 head, - u32 len, u32 size, s64 shift) -{ - struct xe_gt *gt = ct_to_gt(ct); - struct xe_device *xe = ct_to_xe(ct); - u32 msg[GUC_HXG_MSG_MIN_LEN]; - u32 action, i, n; - - xe_gt_assert(gt, len >= GUC_HXG_MSG_MIN_LEN); - - msg[0] = xe_map_rd_ring_u32(xe, cmds, head, size); - action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]); - - xe_gt_sriov_dbg_verbose(gt, "fixing H2G %#x\n", action); - - switch (action) { - case XE_GUC_ACTION_REGISTER_CONTEXT: - if (len != XE_GUC_REGISTER_CONTEXT_MSG_LEN) - goto err_len; - xe_fixup_u64_in_cmds(xe, cmds, size, head + - XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER, - shift); - xe_fixup_u64_in_cmds(xe, cmds, size, head + - XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER, - shift); - xe_fixup_u64_in_cmds(xe, cmds, size, head + - XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift); - break; - case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC: - if (len < XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN) - goto err_len; - n = xe_map_rd_ring_u32(xe, cmds, head + - XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, size); - if (len != XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN + 2 * n) - goto err_len; - xe_fixup_u64_in_cmds(xe, cmds, size, head + - XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER, - shift); - xe_fixup_u64_in_cmds(xe, cmds, size, head + - XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER, - shift); - for (i = 0; i < n; i++) - xe_fixup_u64_in_cmds(xe, cmds, size, head + - XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR - + 2 * i, shift); - break; - default: - break; - } - return true; - -err_len: - xe_gt_err(gt, "Skipped G2G %#x message fixups, unexpected length (%u)\n", action, len); - return false; -} - -/* - * Apply fixups to the next outgoing CT message within given CTB - * @ct: the &xe_guc_ct struct instance representing the target GuC - * @h2g: the &guc_ctb struct instance of the target buffer - * @shift: shift to be added to all GGTT addresses within the CTB - * @mhead: pointer to an integer storing message start position; the - * position is changed to next message before this function return - * @avail: size of the area available for parsing, that is length - * of all remaining messages stored within the CTB - * Return: size of the area available for parsing after one message - * has been parsed, that is length remaining from the updated mhead - */ -static int ct_fixup_ggtt_in_buffer(struct xe_guc_ct *ct, struct guc_ctb *h2g, - s64 shift, u32 *mhead, s32 avail) -{ - struct xe_gt *gt = ct_to_gt(ct); - struct xe_device *xe = ct_to_xe(ct); - u32 msg[GUC_HXG_MSG_MIN_LEN]; - u32 size = h2g->info.size; - u32 head = *mhead; - u32 len; - - xe_gt_assert(gt, avail >= (s32)GUC_CTB_MSG_MIN_LEN); - - /* Read header */ - msg[0] = xe_map_rd_ring_u32(xe, &h2g->cmds, head, size); - len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN; - - if (unlikely(len > (u32)avail)) { - xe_gt_err(gt, "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n", - avail, len); - return 0; - } - - head = (head + GUC_CTB_MSG_MIN_LEN) % size; - if (!ct_fixup_ggtt_in_message(ct, &h2g->cmds, head, msg_len_to_hxg_len(len), size, shift)) - return 0; - *mhead = (head + msg_len_to_hxg_len(len)) % size; - - return avail - len; -} - -/** - * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages - * @ct: pointer to CT struct of the target GuC - * @ggtt_shift: shift to be added to all GGTT addresses within the CTB - * - * Messages in GuC to Host CTB are owned by GuC and any fixups in them - * are made by GuC. But content of the Host to GuC CTB is owned by the - * KMD, so fixups to GGTT references in any pending messages need to be - * applied here. - * This function updates GGTT offsets in payloads of pending H2G CTB - * messages (messages which were not consumed by GuC before the VF got - * paused). - */ -void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift) -{ - struct guc_ctb *h2g = &ct->ctbs.h2g; - struct xe_guc *guc = ct_to_guc(ct); - struct xe_gt *gt = guc_to_gt(guc); - u32 head, tail, size; - s32 avail; - - if (unlikely(h2g->info.broken)) - return; - - h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); - head = h2g->info.head; - tail = READ_ONCE(h2g->info.tail); - size = h2g->info.size; - - if (unlikely(head > size)) - goto corrupted; - - if (unlikely(tail >= size)) - goto corrupted; - - avail = tail - head; - - /* beware of buffer wrap case */ - if (unlikely(avail < 0)) - avail += size; - xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size); - xe_gt_assert(gt, avail >= 0); - - while (avail > 0) - avail = ct_fixup_ggtt_in_buffer(ct, h2g, ggtt_shift, &head, avail); - - return; - -corrupted: - xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n", - head, tail, size); - h2g->info.broken = true; -} - static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic, bool want_ctb) { diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h index cf41210ab30a..ca1ce2b3c354 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.h +++ b/drivers/gpu/drm/xe/xe_guc_ct.h @@ -15,8 +15,10 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct); int xe_guc_ct_init(struct xe_guc_ct *ct); int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct); int xe_guc_ct_enable(struct xe_guc_ct *ct); +int xe_guc_ct_restart(struct xe_guc_ct *ct); void xe_guc_ct_disable(struct xe_guc_ct *ct); void xe_guc_ct_stop(struct xe_guc_ct *ct); +void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct); void xe_guc_ct_fast_path(struct xe_guc_ct *ct); struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct); @@ -24,8 +26,6 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot); void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb); -void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift); - static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct) { return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED; @@ -74,4 +74,13 @@ xe_guc_ct_send_block_no_fail(struct xe_guc_ct *ct, const u32 *action, u32 len) long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct); +/** + * xe_guc_ct_wake_waiters() - GuC CT wake up waiters + * @ct: GuC CT object + */ +static inline void xe_guc_ct_wake_waiters(struct xe_guc_ct *ct) +{ + wake_up_all(&ct->wq); +} + #endif diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h index 8b03b50313d9..09d7ff1ef42a 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct_types.h +++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h @@ -126,7 +126,7 @@ struct xe_fast_req_fence { * for the H2G and G2H requests sent and received through the buffers. */ struct xe_guc_ct { - /** @bo: XE BO for CT */ + /** @bo: Xe BO for CT */ struct xe_bo *bo; /** @lock: protects everything in CT layer */ struct mutex lock; diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h index c30c0e3ccbbb..a3b034e4b205 100644 --- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h @@ -51,6 +51,21 @@ struct xe_guc_exec_queue { wait_queue_head_t suspend_wait; /** @suspend_pending: a suspend of the exec_queue is pending */ bool suspend_pending; + /** + * @needs_cleanup: Needs a cleanup message during VF post migration + * recovery. + */ + bool needs_cleanup; + /** + * @needs_suspend: Needs a suspend message during VF post migration + * recovery. + */ + bool needs_suspend; + /** + * @needs_resume: Needs a resume message during VF post migration + * recovery. + */ + bool needs_resume; }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h index 50c4c2406132..c90dd266e9cf 100644 --- a/drivers/gpu/drm/xe/xe_guc_fwif.h +++ b/drivers/gpu/drm/xe/xe_guc_fwif.h @@ -113,6 +113,7 @@ struct guc_update_exec_queue_policy { #define GUC_CTL_ENABLE_SLPC BIT(2) #define GUC_CTL_ENABLE_LITE_RESTORE BIT(4) #define GUC_CTL_ENABLE_PSMI_LOGGING BIT(7) +#define GUC_CTL_MAIN_GAMCTRL_QUEUES BIT(9) #define GUC_CTL_DISABLE_SCHEDULER BIT(14) #define GUC_CTL_DEBUG 3 diff --git a/drivers/gpu/drm/xe/xe_guc_log_types.h b/drivers/gpu/drm/xe/xe_guc_log_types.h index b3d5c72ac752..02851b924aa4 100644 --- a/drivers/gpu/drm/xe/xe_guc_log_types.h +++ b/drivers/gpu/drm/xe/xe_guc_log_types.h @@ -44,7 +44,7 @@ struct xe_guc_log_snapshot { struct xe_guc_log { /** @level: GuC log level */ u32 level; - /** @bo: XE BO for GuC log */ + /** @bo: Xe BO for GuC log */ struct xe_bo *bo; /** @stats: logging related stats */ struct { diff --git a/drivers/gpu/drm/xe/xe_guc_pagefault.c b/drivers/gpu/drm/xe/xe_guc_pagefault.c new file mode 100644 index 000000000000..719a18187a31 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_pagefault.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include "abi/guc_actions_abi.h" +#include "xe_guc.h" +#include "xe_guc_ct.h" +#include "xe_guc_pagefault.h" +#include "xe_pagefault.h" + +static void guc_ack_fault(struct xe_pagefault *pf, int err) +{ + u32 vfid = FIELD_GET(PFD_VFID, pf->producer.msg[2]); + u32 engine_instance = FIELD_GET(PFD_ENG_INSTANCE, pf->producer.msg[0]); + u32 engine_class = FIELD_GET(PFD_ENG_CLASS, pf->producer.msg[0]); + u32 pdata = FIELD_GET(PFD_PDATA_LO, pf->producer.msg[0]) | + (FIELD_GET(PFD_PDATA_HI, pf->producer.msg[1]) << + PFD_PDATA_HI_SHIFT); + u32 action[] = { + XE_GUC_ACTION_PAGE_FAULT_RES_DESC, + + FIELD_PREP(PFR_VALID, 1) | + FIELD_PREP(PFR_SUCCESS, !!err) | + FIELD_PREP(PFR_REPLY, PFR_ACCESS) | + FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) | + FIELD_PREP(PFR_ASID, pf->consumer.asid), + + FIELD_PREP(PFR_VFID, vfid) | + FIELD_PREP(PFR_ENG_INSTANCE, engine_instance) | + FIELD_PREP(PFR_ENG_CLASS, engine_class) | + FIELD_PREP(PFR_PDATA, pdata), + }; + struct xe_guc *guc = pf->producer.private; + + xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0); +} + +static const struct xe_pagefault_ops guc_pagefault_ops = { + .ack_fault = guc_ack_fault, +}; + +/** + * xe_guc_pagefault_handler() - G2H page fault handler + * @guc: GuC object + * @msg: G2H message + * @len: Length of G2H message + * + * Parse GuC to host (G2H) message into a struct xe_pagefault and forward onto + * the Xe page fault layer. + * + * Return: 0 on success, errno on failure + */ +int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len) +{ + struct xe_pagefault pf; + int i; + +#define GUC_PF_MSG_LEN_DW \ + (sizeof(struct xe_guc_pagefault_desc) / sizeof(u32)) + + BUILD_BUG_ON(GUC_PF_MSG_LEN_DW > XE_PAGEFAULT_PRODUCER_MSG_LEN_DW); + + if (len != GUC_PF_MSG_LEN_DW) + return -EPROTO; + + pf.gt = guc_to_gt(guc); + + /* + * XXX: These values happen to match the enum in xe_pagefault_types.h. + * If that changes, we’ll need to remap them here. + */ + pf.consumer.page_addr = ((u64)FIELD_GET(PFD_VIRTUAL_ADDR_HI, msg[3]) + << PFD_VIRTUAL_ADDR_HI_SHIFT) | + (FIELD_GET(PFD_VIRTUAL_ADDR_LO, msg[2]) << + PFD_VIRTUAL_ADDR_LO_SHIFT); + pf.consumer.asid = FIELD_GET(PFD_ASID, msg[1]); + pf.consumer.access_type = FIELD_GET(PFD_ACCESS_TYPE, msg[2]); + pf.consumer.fault_type = FIELD_GET(PFD_FAULT_TYPE, msg[2]); + if (FIELD_GET(XE2_PFD_TRVA_FAULT, msg[0])) + pf.consumer.fault_level = XE_PAGEFAULT_LEVEL_NACK; + else + pf.consumer.fault_level = FIELD_GET(PFD_FAULT_LEVEL, msg[0]); + pf.consumer.engine_class = FIELD_GET(PFD_ENG_CLASS, msg[0]); + pf.consumer.engine_instance = FIELD_GET(PFD_ENG_INSTANCE, msg[0]); + + pf.producer.private = guc; + pf.producer.ops = &guc_pagefault_ops; + for (i = 0; i < GUC_PF_MSG_LEN_DW; ++i) + pf.producer.msg[i] = msg[i]; + +#undef GUC_PF_MSG_LEN_DW + + return xe_pagefault_handler(guc_to_xe(guc), &pf); +} diff --git a/drivers/gpu/drm/xe/xe_guc_pagefault.h b/drivers/gpu/drm/xe/xe_guc_pagefault.h new file mode 100644 index 000000000000..3bd599e7207c --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_pagefault.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_GUC_PAGEFAULT_H_ +#define _XE_GUC_PAGEFAULT_H_ + +#include <linux/types.h> + +struct xe_guc; + +int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len); + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 53fdf59524c4..951a49fb1d3e 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -7,12 +7,14 @@ #include <linux/cleanup.h> #include <linux/delay.h> +#include <linux/iopoll.h> #include <linux/jiffies.h> #include <linux/ktime.h> #include <linux/wait_bit.h> #include <drm/drm_managed.h> #include <drm/drm_print.h> +#include <generated/xe_device_wa_oob.h> #include <generated/xe_wa_oob.h> #include "abi/guc_actions_slpc_abi.h" @@ -130,26 +132,16 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc) FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count)) static int wait_for_pc_state(struct xe_guc_pc *pc, - enum slpc_global_state state, + enum slpc_global_state target_state, int timeout_ms) { - int timeout_us = 1000 * timeout_ms; - int slept, wait = 10; + enum slpc_global_state state; xe_device_assert_mem_access(pc_to_xe(pc)); - for (slept = 0; slept < timeout_us;) { - if (slpc_shared_data_read(pc, header.global_state) == state) - return 0; - - usleep_range(wait, wait << 1); - slept += wait; - wait <<= 1; - if (slept + wait > timeout_us) - wait = timeout_us - slept; - } - - return -ETIMEDOUT; + return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state), + state == target_state, + 20, timeout_ms * USEC_PER_MSEC, false); } static int wait_for_flush_complete(struct xe_guc_pc *pc) @@ -164,24 +156,15 @@ static int wait_for_flush_complete(struct xe_guc_pc *pc) return 0; } -static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq) +static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit) { - int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC; - int slept, wait = 10; - - for (slept = 0; slept < timeout_us;) { - if (xe_guc_pc_get_act_freq(pc) <= freq) - return 0; - - usleep_range(wait, wait << 1); - slept += wait; - wait <<= 1; - if (slept + wait > timeout_us) - wait = timeout_us - slept; - } + u32 freq; - return -ETIMEDOUT; + return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc), + freq <= max_limit, + 20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false); } + static int pc_action_reset(struct xe_guc_pc *pc) { struct xe_guc_ct *ct = pc_to_ct(pc); @@ -348,7 +331,7 @@ static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) * Our goal is to have the admin choices respected. */ pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, - freq < pc->rpe_freq); + freq < xe_guc_pc_get_rpe_freq(pc)); return pc_action_set_param(pc, SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, @@ -380,7 +363,7 @@ static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) freq); } -static void mtl_update_rpa_value(struct xe_guc_pc *pc) +static u32 mtl_get_rpa_freq(struct xe_guc_pc *pc) { struct xe_gt *gt = pc_to_gt(pc); u32 reg; @@ -390,10 +373,10 @@ static void mtl_update_rpa_value(struct xe_guc_pc *pc) else reg = xe_mmio_read32(>->mmio, MTL_GT_RPA_FREQUENCY); - pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg)); + return decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg)); } -static void mtl_update_rpe_value(struct xe_guc_pc *pc) +static u32 mtl_get_rpe_freq(struct xe_guc_pc *pc) { struct xe_gt *gt = pc_to_gt(pc); u32 reg; @@ -403,68 +386,56 @@ static void mtl_update_rpe_value(struct xe_guc_pc *pc) else reg = xe_mmio_read32(>->mmio, MTL_GT_RPE_FREQUENCY); - pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg)); + return decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg)); } -static void tgl_update_rpa_value(struct xe_guc_pc *pc) +static u32 pvc_get_rpa_freq(struct xe_guc_pc *pc) { - struct xe_gt *gt = pc_to_gt(pc); - struct xe_device *xe = gt_to_xe(gt); - u32 reg; - /* * For PVC we still need to use fused RP0 as the approximation for RPa * For other platforms than PVC we get the resolved RPa directly from * PCODE at a different register */ - if (xe->info.platform == XE_PVC) { - reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); - pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; - } else { - reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC); - pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER; - } + + struct xe_gt *gt = pc_to_gt(pc); + u32 reg; + + reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); + return REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; } -static void tgl_update_rpe_value(struct xe_guc_pc *pc) +static u32 tgl_get_rpa_freq(struct xe_guc_pc *pc) +{ + struct xe_gt *gt = pc_to_gt(pc); + u32 reg; + + reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC); + return REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER; +} + +static u32 pvc_get_rpe_freq(struct xe_guc_pc *pc) { struct xe_gt *gt = pc_to_gt(pc); - struct xe_device *xe = gt_to_xe(gt); u32 reg; /* * For PVC we still need to use fused RP1 as the approximation for RPe - * For other platforms than PVC we get the resolved RPe directly from - * PCODE at a different register */ - if (xe->info.platform == XE_PVC) { - reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); - pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER; - } else { - reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC); - pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER; - } + reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); + return REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER; } -static void pc_update_rp_values(struct xe_guc_pc *pc) +static u32 tgl_get_rpe_freq(struct xe_guc_pc *pc) { struct xe_gt *gt = pc_to_gt(pc); - struct xe_device *xe = gt_to_xe(gt); - - if (GRAPHICS_VERx100(xe) >= 1270) { - mtl_update_rpa_value(pc); - mtl_update_rpe_value(pc); - } else { - tgl_update_rpa_value(pc); - tgl_update_rpe_value(pc); - } + u32 reg; /* - * RPe is decided at runtime by PCODE. In the rare case where that's - * smaller than the fused min, we will trust the PCODE and use that - * as our minimum one. + * For other platforms than PVC, we get the resolved RPe directly from + * PCODE at a different register */ - pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq); + reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC); + return REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER; } /** @@ -565,9 +536,15 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc) */ u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc) { - pc_update_rp_values(pc); + struct xe_gt *gt = pc_to_gt(pc); + struct xe_device *xe = gt_to_xe(gt); - return pc->rpa_freq; + if (GRAPHICS_VERx100(xe) == 1260) + return pvc_get_rpa_freq(pc); + else if (GRAPHICS_VERx100(xe) >= 1270) + return mtl_get_rpa_freq(pc); + else + return tgl_get_rpa_freq(pc); } /** @@ -578,9 +555,17 @@ u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc) */ u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc) { - pc_update_rp_values(pc); + struct xe_device *xe = pc_to_xe(pc); + u32 freq; + + if (GRAPHICS_VERx100(xe) == 1260) + freq = pvc_get_rpe_freq(pc); + else if (GRAPHICS_VERx100(xe) >= 1270) + freq = mtl_get_rpe_freq(pc); + else + freq = tgl_get_rpe_freq(pc); - return pc->rpe_freq; + return freq; } /** @@ -904,7 +889,7 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc) if (pc_get_min_freq(pc) > pc->rp0_freq) ret = pc_set_min_freq(pc, pc->rp0_freq); - if (XE_GT_WA(tile->primary_gt, 14022085890)) + if (XE_DEVICE_WA(tile_to_xe(tile), 14022085890)) ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc))); out: @@ -983,7 +968,7 @@ void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc) * Wait for actual freq to go below the flush cap: even if the previous * max was below cap, the current one might still be above it */ - ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP); + ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP); if (ret) xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n", BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); @@ -1039,7 +1024,7 @@ static int pc_set_mert_freq_cap(struct xe_guc_pc *pc) /* * Ensure min and max are bound by MERT_FREQ_CAP until driver loads. */ - ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc))); + ret = pc_set_min_freq(pc, min(xe_guc_pc_get_rpe_freq(pc), pc_max_freq_cap(pc))); if (!ret) ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc))); @@ -1150,8 +1135,6 @@ static int pc_init_freqs(struct xe_guc_pc *pc) if (ret) goto out; - pc_update_rp_values(pc); - pc_init_pcode_freq(pc); /* @@ -1357,7 +1340,7 @@ static void xe_guc_pc_fini_hw(void *arg) XE_WARN_ON(xe_guc_pc_stop(pc)); /* Bind requested freq to mert_freq_cap before unload */ - pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq)); + pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), xe_guc_pc_get_rpe_freq(pc))); xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref); } diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h index 5e4ea53fbee6..711bbcdcb0d3 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h @@ -19,10 +19,6 @@ struct xe_guc_pc { atomic_t flush_freq_limit; /** @rp0_freq: HW RP0 frequency - The Maximum one */ u32 rp0_freq; - /** @rpa_freq: HW RPa frequency - The Achievable one */ - u32 rpa_freq; - /** @rpe_freq: HW RPe frequency - The Efficient one */ - u32 rpe_freq; /** @rpn_freq: HW RPN frequency - The Minimum one */ u32 rpn_freq; /** @user_requested_min: Stash the minimum requested freq by user */ diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c index e5dc94f3e618..0c0ff24ba62a 100644 --- a/drivers/gpu/drm/xe/xe_guc_relay.c +++ b/drivers/gpu/drm/xe/xe_guc_relay.c @@ -56,9 +56,19 @@ static struct xe_device *relay_to_xe(struct xe_guc_relay *relay) return gt_to_xe(relay_to_gt(relay)); } +#define XE_RELAY_DIAG_RATELIMIT_INTERVAL (10 * HZ) +#define XE_RELAY_DIAG_RATELIMIT_BURST 10 + +#define relay_ratelimit_printk(relay, _level, fmt...) ({ \ + typeof(relay) _r = (relay); \ + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) || \ + ___ratelimit(&_r->diag_ratelimit, "xe_guc_relay")) \ + xe_gt_sriov_##_level(relay_to_gt(_r), "relay: " fmt); \ +}) + #define relay_assert(relay, condition) xe_gt_assert(relay_to_gt(relay), condition) -#define relay_notice(relay, msg...) xe_gt_sriov_notice(relay_to_gt(relay), "relay: " msg) -#define relay_debug(relay, msg...) xe_gt_sriov_dbg_verbose(relay_to_gt(relay), "relay: " msg) +#define relay_notice(relay, msg...) relay_ratelimit_printk((relay), notice, msg) +#define relay_debug(relay, msg...) relay_ratelimit_printk((relay), dbg_verbose, msg) static int relay_get_totalvfs(struct xe_guc_relay *relay) { @@ -345,6 +355,9 @@ int xe_guc_relay_init(struct xe_guc_relay *relay) INIT_WORK(&relay->worker, relays_worker_fn); INIT_LIST_HEAD(&relay->pending_relays); INIT_LIST_HEAD(&relay->incoming_actions); + ratelimit_state_init(&relay->diag_ratelimit, + XE_RELAY_DIAG_RATELIMIT_INTERVAL, + XE_RELAY_DIAG_RATELIMIT_BURST); err = mempool_init_kmalloc_pool(&relay->pool, XE_RELAY_MEMPOOL_MIN_NUM + relay_get_totalvfs(relay), diff --git a/drivers/gpu/drm/xe/xe_guc_relay_types.h b/drivers/gpu/drm/xe/xe_guc_relay_types.h index 5999fcb77e96..20eee10856b2 100644 --- a/drivers/gpu/drm/xe/xe_guc_relay_types.h +++ b/drivers/gpu/drm/xe/xe_guc_relay_types.h @@ -7,6 +7,7 @@ #define _XE_GUC_RELAY_TYPES_H_ #include <linux/mempool.h> +#include <linux/ratelimit_types.h> #include <linux/spinlock.h> #include <linux/workqueue.h> @@ -31,6 +32,9 @@ struct xe_guc_relay { /** @last_rid: last Relay-ID used while sending a message. */ u32 last_rid; + + /** @diag_ratelimit: ratelimit state used to throttle diagnostics messages. */ + struct ratelimit_state diag_ratelimit; }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 94ed8159496f..d4ffdb71ef3d 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -70,6 +70,8 @@ exec_queue_to_guc(struct xe_exec_queue *q) #define EXEC_QUEUE_STATE_BANNED (1 << 9) #define EXEC_QUEUE_STATE_CHECK_TIMEOUT (1 << 10) #define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11) +#define EXEC_QUEUE_STATE_PENDING_RESUME (1 << 12) +#define EXEC_QUEUE_STATE_PENDING_TDR_EXIT (1 << 13) static bool exec_queue_registered(struct xe_exec_queue *q) { @@ -141,6 +143,11 @@ static void set_exec_queue_destroyed(struct xe_exec_queue *q) atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state); } +static void clear_exec_queue_destroyed(struct xe_exec_queue *q) +{ + atomic_and(~EXEC_QUEUE_STATE_DESTROYED, &q->guc->state); +} + static bool exec_queue_banned(struct xe_exec_queue *q) { return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED; @@ -221,6 +228,41 @@ static void set_exec_queue_extra_ref(struct xe_exec_queue *q) atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state); } +static void clear_exec_queue_extra_ref(struct xe_exec_queue *q) +{ + atomic_and(~EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state); +} + +static bool exec_queue_pending_resume(struct xe_exec_queue *q) +{ + return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_RESUME; +} + +static void set_exec_queue_pending_resume(struct xe_exec_queue *q) +{ + atomic_or(EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state); +} + +static void clear_exec_queue_pending_resume(struct xe_exec_queue *q) +{ + atomic_and(~EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state); +} + +static bool exec_queue_pending_tdr_exit(struct xe_exec_queue *q) +{ + return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_TDR_EXIT; +} + +static void set_exec_queue_pending_tdr_exit(struct xe_exec_queue *q) +{ + atomic_or(EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state); +} + +static void clear_exec_queue_pending_tdr_exit(struct xe_exec_queue *q) +{ + atomic_and(~EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state); +} + static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) { return (atomic_read(&q->guc->state) & @@ -670,6 +712,11 @@ static u32 wq_space_until_wrap(struct xe_exec_queue *q) return (WQ_SIZE - q->guc->wqi_tail); } +static bool vf_recovery(struct xe_guc *guc) +{ + return xe_gt_recovery_pending(guc_to_gt(guc)); +} + static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size) { struct xe_guc *guc = exec_queue_to_guc(q); @@ -679,7 +726,7 @@ static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size) #define AVAILABLE_SPACE \ CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE) - if (wqi_size > AVAILABLE_SPACE) { + if (wqi_size > AVAILABLE_SPACE && !vf_recovery(guc)) { try_again: q->guc->wqi_head = parallel_read(xe, map, wq_desc.head); if (wqi_size > AVAILABLE_SPACE) { @@ -736,18 +783,12 @@ static void wq_item_append(struct xe_exec_queue *q) if (wq_wait_for_space(q, wqi_size)) return; - xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN); wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) | FIELD_PREP(WQ_LEN_MASK, len_dw); - xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW); wqi[i++] = xe_lrc_descriptor(q->lrc[0]); - xe_gt_assert(guc_to_gt(guc), i == - XE_GUC_CONTEXT_WQ_EL_INFO_DATA_2_GUCCTX_RINGTAIL_FREEZEPOCS); wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) | FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64)); - xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_INFO_DATA_3_WI_FENCE_ID); wqi[i++] = 0; - xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_CHILD_LIST_DATA_4_RINGTAIL); for (j = 1; j < q->width; ++j) { struct xe_lrc *lrc = q->lrc[j]; @@ -768,52 +809,8 @@ static void wq_item_append(struct xe_exec_queue *q) parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail); } -static int wq_items_rebase(struct xe_exec_queue *q) -{ - struct xe_guc *guc = exec_queue_to_guc(q); - struct xe_device *xe = guc_to_xe(guc); - struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); - int i = q->guc->wqi_head; - - /* the ring starts after a header struct */ - iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch, wq[0])); - - while ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) { - u32 len_dw, type, val; - - if (drm_WARN_ON_ONCE(&xe->drm, i < 0 || i > 2 * WQ_SIZE)) - break; - - val = xe_map_rd_ring_u32(xe, &map, i / sizeof(u32) + - XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN, - WQ_SIZE / sizeof(u32)); - len_dw = FIELD_GET(WQ_LEN_MASK, val); - type = FIELD_GET(WQ_TYPE_MASK, val); - - if (drm_WARN_ON_ONCE(&xe->drm, len_dw >= WQ_SIZE / sizeof(u32))) - break; - - if (type == WQ_TYPE_MULTI_LRC) { - val = xe_lrc_descriptor(q->lrc[0]); - xe_map_wr_ring_u32(xe, &map, i / sizeof(u32) + - XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW, - WQ_SIZE / sizeof(u32), val); - } else if (drm_WARN_ON_ONCE(&xe->drm, type != WQ_TYPE_NOOP)) { - break; - } - - i += (len_dw + 1) * sizeof(u32); - } - - if ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) { - xe_gt_err(q->gt, "Exec queue fixups incomplete - wqi parse failed\n"); - return -EBADMSG; - } - return 0; -} - #define RESUME_PENDING ~0x0ull -static void submit_exec_queue(struct xe_exec_queue *q) +static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job) { struct xe_guc *guc = exec_queue_to_guc(q); struct xe_lrc *lrc = q->lrc[0]; @@ -825,10 +822,13 @@ static void submit_exec_queue(struct xe_exec_queue *q) xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); - if (xe_exec_queue_is_parallel(q)) - wq_item_append(q); - else - xe_lrc_set_ring_tail(lrc, lrc->ring.tail); + if (!job->skip_emit || job->last_replay) { + if (xe_exec_queue_is_parallel(q)) + wq_item_append(q); + else + xe_lrc_set_ring_tail(lrc, lrc->ring.tail); + job->last_replay = false; + } if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q)) return; @@ -870,54 +870,33 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job) struct xe_sched_job *job = to_xe_sched_job(drm_job); struct xe_exec_queue *q = job->q; struct xe_guc *guc = exec_queue_to_guc(q); - struct dma_fence *fence = NULL; - bool lr = xe_exec_queue_is_lr(q); + bool lr = xe_exec_queue_is_lr(q), killed_or_banned_or_wedged = + exec_queue_killed_or_banned_or_wedged(q); xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || exec_queue_banned(q) || exec_queue_suspended(q)); trace_xe_sched_job_run(job); - if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) { + if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) { if (!exec_queue_registered(q)) register_exec_queue(q, GUC_CONTEXT_NORMAL); - if (!lr) /* LR jobs are emitted in the exec IOCTL */ + if (!job->skip_emit) q->ring_ops->emit_job(job); - submit_exec_queue(q); + submit_exec_queue(q, job); + job->skip_emit = false; } - if (lr) { - xe_sched_job_set_error(job, -EOPNOTSUPP); - dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */ - } else { - fence = job->fence; - } - - return fence; -} - -/** - * xe_guc_jobs_ring_rebase - Re-emit ring commands of requests pending - * on all queues under a guc. - * @guc: the &xe_guc struct instance - */ -void xe_guc_jobs_ring_rebase(struct xe_guc *guc) -{ - struct xe_exec_queue *q; - unsigned long index; - /* - * This routine is used within VF migration recovery. This means - * using the lock here introduces a restriction: we cannot wait - * for any GFX HW response while the lock is taken. + * We don't care about job-fence ordering in LR VMs because these fences + * are never exported; they are used solely to keep jobs on the pending + * list. Once a queue enters an error state, there's no need to track + * them. */ - mutex_lock(&guc->submission_state.lock); - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { - if (exec_queue_killed_or_banned_or_wedged(q)) - continue; - xe_exec_queue_jobs_ring_restore(q); - } - mutex_unlock(&guc->submission_state.lock); + if (killed_or_banned_or_wedged && lr) + xe_sched_job_set_error(job, -ECANCELED); + + return job->fence; } static void guc_exec_queue_free_job(struct drm_sched_job *drm_job) @@ -951,15 +930,17 @@ static void disable_scheduling_deregister(struct xe_guc *guc, ret = wait_event_timeout(guc->ct.wq, (!exec_queue_pending_enable(q) && !exec_queue_pending_disable(q)) || - xe_guc_read_stopped(guc), + xe_guc_read_stopped(guc) || + vf_recovery(guc), HZ * 5); - if (!ret) { + if (!ret && !vf_recovery(guc)) { struct xe_gpu_scheduler *sched = &q->guc->sched; xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n"); xe_sched_submission_start(sched); xe_gt_reset_async(q->gt); - xe_sched_tdr_queue_imm(sched); + if (!xe_exec_queue_is_lr(q)) + xe_sched_tdr_queue_imm(sched); return; } @@ -1051,9 +1032,14 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w) struct xe_exec_queue *q = ge->q; struct xe_guc *guc = exec_queue_to_guc(q); struct xe_gpu_scheduler *sched = &ge->sched; + struct xe_sched_job *job; bool wedged = false; xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q)); + + if (vf_recovery(guc)) + return; + trace_xe_exec_queue_lr_cleanup(q); if (!exec_queue_killed(q)) @@ -1086,7 +1072,11 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w) */ ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_disable(q) || - xe_guc_read_stopped(guc), HZ * 5); + xe_guc_read_stopped(guc) || + vf_recovery(guc), HZ * 5); + if (vf_recovery(guc)) + return; + if (!ret) { xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n", q->guc->id); @@ -1101,7 +1091,16 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w) if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0])) xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id); + xe_hw_fence_irq_stop(q->fence_irq); + xe_sched_submission_start(sched); + + spin_lock(&sched->base.job_list_lock); + list_for_each_entry(job, &sched->base.pending_list, drm.list) + xe_sched_job_set_error(job, -ECANCELED); + spin_unlock(&sched->base.job_list_lock); + + xe_hw_fence_irq_start(q->fence_irq); } #define ADJUST_FIVE_PERCENT(__t) mul_u64_u32_div(__t, 105, 100) @@ -1167,12 +1166,14 @@ static void enable_scheduling(struct xe_exec_queue *q) ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || - xe_guc_read_stopped(guc), HZ * 5); - if (!ret || xe_guc_read_stopped(guc)) { + xe_guc_read_stopped(guc) || + vf_recovery(guc), HZ * 5); + if ((!ret && !vf_recovery(guc)) || xe_guc_read_stopped(guc)) { xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond"); set_exec_queue_banned(q); xe_gt_reset_async(q->gt); - xe_sched_tdr_queue_imm(&q->guc->sched); + if (!xe_exec_queue_is_lr(q)) + xe_sched_tdr_queue_imm(&q->guc->sched); } } @@ -1230,13 +1231,16 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) int i = 0; bool wedged = false, skip_timeout_check; + xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_lr(q)); + /* * TDR has fired before free job worker. Common if exec queue * immediately closed after last fence signaled. Add back to pending * list so job can be freed and kick scheduler ensuring free job is not * lost. */ - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags) || + vf_recovery(guc)) return DRM_GPU_SCHED_STAT_NO_HANG; /* Kill the run_job entry point */ @@ -1288,7 +1292,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) ret = wait_event_timeout(guc->ct.wq, (!exec_queue_pending_enable(q) && !exec_queue_pending_disable(q)) || - xe_guc_read_stopped(guc), HZ * 5); + xe_guc_read_stopped(guc) || + vf_recovery(guc), HZ * 5); + if (vf_recovery(guc)) + goto handle_vf_resume; if (!ret || xe_guc_read_stopped(guc)) goto trigger_reset; @@ -1313,7 +1320,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) smp_rmb(); ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_disable(q) || - xe_guc_read_stopped(guc), HZ * 5); + xe_guc_read_stopped(guc) || + vf_recovery(guc), HZ * 5); + if (vf_recovery(guc)) + goto handle_vf_resume; if (!ret || xe_guc_read_stopped(guc)) { trigger_reset: if (!ret) @@ -1409,6 +1419,7 @@ trigger_reset: return DRM_GPU_SCHED_STAT_RESET; sched_enable: + set_exec_queue_pending_tdr_exit(q); enable_scheduling(q); rearm: /* @@ -1417,6 +1428,7 @@ rearm: * some thought, do this in a follow up. */ xe_sched_submission_start(sched); +handle_vf_resume: return DRM_GPU_SCHED_STAT_NO_HANG; } @@ -1523,11 +1535,24 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms static void __suspend_fence_signal(struct xe_exec_queue *q) { + struct xe_guc *guc = exec_queue_to_guc(q); + struct xe_device *xe = guc_to_xe(guc); + if (!q->guc->suspend_pending) return; WRITE_ONCE(q->guc->suspend_pending, false); - wake_up(&q->guc->suspend_wait); + + /* + * We use a GuC shared wait queue for VFs because the VF resfix start + * interrupt must be able to wake all instances of suspend_wait. This + * prevents the VF migration worker from being starved during + * scheduling. + */ + if (IS_SRIOV_VF(xe)) + wake_up_all(&guc->ct.wq); + else + wake_up(&q->guc->suspend_wait); } static void suspend_fence_signal(struct xe_exec_queue *q) @@ -1548,8 +1573,9 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg) if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) && exec_queue_enabled(q)) { - wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING || - xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q)); + wait_event(guc->ct.wq, vf_recovery(guc) || + ((q->guc->resume_time != RESUME_PENDING || + xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q))); if (!xe_guc_read_stopped(guc)) { s64 since_resume_ms = @@ -1578,6 +1604,7 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg) clear_exec_queue_suspended(q); if (!exec_queue_enabled(q)) { q->guc->resume_time = RESUME_PENDING; + set_exec_queue_pending_resume(q); enable_scheduling(q); } } else { @@ -1591,6 +1618,7 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg) #define RESUME 4 #define OPCODE_MASK 0xf #define MSG_LOCKED BIT(8) +#define MSG_HEAD BIT(9) static void guc_exec_queue_process_msg(struct xe_sched_msg *msg) { @@ -1653,7 +1681,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(q->sched_props.job_timeout_ms); err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, - NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, + NULL, xe_lrc_ring_size() / MAX_JOB_SIZE_BYTES, 64, timeout, guc_to_gt(guc)->ordered_wq, NULL, q->name, gt_to_xe(q->gt)->drm.dev); if (err) @@ -1675,7 +1703,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) q->entity = &ge->entity; - if (xe_guc_read_stopped(guc)) + if (xe_guc_read_stopped(guc) || vf_recovery(guc)) xe_sched_stop(sched); mutex_unlock(&guc->submission_state.lock); @@ -1715,12 +1743,24 @@ static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg msg->private_data = q; trace_xe_sched_msg_add(msg); - if (opcode & MSG_LOCKED) + if (opcode & MSG_HEAD) + xe_sched_add_msg_head(&q->guc->sched, msg); + else if (opcode & MSG_LOCKED) xe_sched_add_msg_locked(&q->guc->sched, msg); else xe_sched_add_msg(&q->guc->sched, msg); } +static void guc_exec_queue_try_add_msg_head(struct xe_exec_queue *q, + struct xe_sched_msg *msg, + u32 opcode) +{ + if (!list_empty(&msg->link)) + return; + + guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED | MSG_HEAD); +} + static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg, u32 opcode) @@ -1821,6 +1861,7 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q) static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) { struct xe_guc *guc = exec_queue_to_guc(q); + struct xe_device *xe = guc_to_xe(guc); int ret; /* @@ -1828,11 +1869,21 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) * suspend_pending upon kill but to be paranoid but races in which * suspend_pending is set after kill also check kill here. */ - ret = wait_event_interruptible_timeout(q->guc->suspend_wait, - !READ_ONCE(q->guc->suspend_pending) || - exec_queue_killed(q) || - xe_guc_read_stopped(guc), - HZ * 5); +#define WAIT_COND \ + (!READ_ONCE(q->guc->suspend_pending) || exec_queue_killed(q) || \ + xe_guc_read_stopped(guc)) + +retry: + if (IS_SRIOV_VF(xe)) + ret = wait_event_interruptible_timeout(guc->ct.wq, WAIT_COND || + vf_recovery(guc), + HZ * 5); + else + ret = wait_event_interruptible_timeout(q->guc->suspend_wait, + WAIT_COND, HZ * 5); + + if (vf_recovery(guc) && !xe_device_wedged((guc_to_xe(guc)))) + return -EAGAIN; if (!ret) { xe_gt_warn(guc_to_gt(guc), @@ -1840,8 +1891,13 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) q->guc->id); /* XXX: Trigger GT reset? */ return -ETIME; + } else if (IS_SRIOV_VF(xe) && !WAIT_COND) { + /* Corner case on RESFIX DONE where vf_recovery() changes */ + goto retry; } +#undef WAIT_COND + return ret < 0 ? ret : 0; } @@ -1864,7 +1920,7 @@ static bool guc_exec_queue_reset_status(struct xe_exec_queue *q) } /* - * All of these functions are an abstraction layer which other parts of XE can + * All of these functions are an abstraction layer which other parts of Xe can * use to trap into the GuC backend. All of these functions, aside from init, * really shouldn't do much other than trap into the DRM scheduler which * synchronizes these operations. @@ -1936,47 +1992,13 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) } } -/** - * xe_guc_submit_reset_block - Disallow reset calls on given GuC. - * @guc: the &xe_guc struct instance - */ -int xe_guc_submit_reset_block(struct xe_guc *guc) -{ - return atomic_fetch_or(1, &guc->submission_state.reset_blocked); -} - -/** - * xe_guc_submit_reset_unblock - Allow back reset calls on given GuC. - * @guc: the &xe_guc struct instance - */ -void xe_guc_submit_reset_unblock(struct xe_guc *guc) -{ - atomic_set_release(&guc->submission_state.reset_blocked, 0); - wake_up_all(&guc->ct.wq); -} - -static int guc_submit_reset_is_blocked(struct xe_guc *guc) -{ - return atomic_read_acquire(&guc->submission_state.reset_blocked); -} - -/* Maximum time of blocking reset */ -#define RESET_BLOCK_PERIOD_MAX (HZ * 5) - -/** - * xe_guc_wait_reset_unblock - Wait until reset blocking flag is lifted, or timeout. - * @guc: the &xe_guc struct instance - */ -int xe_guc_wait_reset_unblock(struct xe_guc *guc) -{ - return wait_event_timeout(guc->ct.wq, - !guc_submit_reset_is_blocked(guc), RESET_BLOCK_PERIOD_MAX); -} - int xe_guc_submit_reset_prepare(struct xe_guc *guc) { int ret; + if (xe_gt_WARN_ON(guc_to_gt(guc), vf_recovery(guc))) + return 0; + if (!guc->submission_state.initialized) return 0; @@ -2026,6 +2048,119 @@ void xe_guc_submit_stop(struct xe_guc *guc) } +static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc, + struct xe_exec_queue *q) +{ + bool pending_enable, pending_disable, pending_resume; + + pending_enable = exec_queue_pending_enable(q); + pending_resume = exec_queue_pending_resume(q); + + if (pending_enable && pending_resume) { + q->guc->needs_resume = true; + xe_gt_dbg(guc_to_gt(guc), "Replay RESUME - guc_id=%d", + q->guc->id); + } + + if (pending_enable && !pending_resume && + !exec_queue_pending_tdr_exit(q)) { + clear_exec_queue_registered(q); + if (xe_exec_queue_is_lr(q)) + xe_exec_queue_put(q); + xe_gt_dbg(guc_to_gt(guc), "Replay REGISTER - guc_id=%d", + q->guc->id); + } + + if (pending_enable) { + clear_exec_queue_enabled(q); + clear_exec_queue_pending_resume(q); + clear_exec_queue_pending_tdr_exit(q); + clear_exec_queue_pending_enable(q); + xe_gt_dbg(guc_to_gt(guc), "Replay ENABLE - guc_id=%d", + q->guc->id); + } + + if (exec_queue_destroyed(q) && exec_queue_registered(q)) { + clear_exec_queue_destroyed(q); + if (exec_queue_extra_ref(q)) + xe_exec_queue_put(q); + else + q->guc->needs_cleanup = true; + clear_exec_queue_extra_ref(q); + xe_gt_dbg(guc_to_gt(guc), "Replay CLEANUP - guc_id=%d", + q->guc->id); + } + + pending_disable = exec_queue_pending_disable(q); + + if (pending_disable && exec_queue_suspended(q)) { + clear_exec_queue_suspended(q); + q->guc->needs_suspend = true; + xe_gt_dbg(guc_to_gt(guc), "Replay SUSPEND - guc_id=%d", + q->guc->id); + } + + if (pending_disable) { + if (!pending_enable) + set_exec_queue_enabled(q); + clear_exec_queue_pending_disable(q); + clear_exec_queue_check_timeout(q); + xe_gt_dbg(guc_to_gt(guc), "Replay DISABLE - guc_id=%d", + q->guc->id); + } + + q->guc->resume_time = 0; +} + +/* + * This function is quite complex but only real way to ensure no state is lost + * during VF resume flows. The function scans the queue state, make adjustments + * as needed, and queues jobs / messages which replayed upon unpause. + */ +static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q) +{ + struct xe_gpu_scheduler *sched = &q->guc->sched; + struct xe_sched_job *job; + int i; + + lockdep_assert_held(&guc->submission_state.lock); + + /* Stop scheduling + flush any DRM scheduler operations */ + xe_sched_submission_stop(sched); + if (xe_exec_queue_is_lr(q)) + cancel_work_sync(&q->guc->lr_tdr); + else + cancel_delayed_work_sync(&sched->base.work_tdr); + + guc_exec_queue_revert_pending_state_change(guc, q); + + if (xe_exec_queue_is_parallel(q)) { + struct xe_device *xe = guc_to_xe(guc); + struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); + + /* + * NOP existing WQ commands that may contain stale GGTT + * addresses. These will be replayed upon unpause. The hardware + * seems to get confused if the WQ head/tail pointers are + * adjusted. + */ + for (i = 0; i < WQ_SIZE / sizeof(u32); ++i) + parallel_write(xe, map, wq[i], + FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) | + FIELD_PREP(WQ_LEN_MASK, 0)); + } + + job = xe_sched_first_pending_job(sched); + if (job) { + /* + * Adjust software tail so jobs submitted overwrite previous + * position in ring buffer with new GGTT addresses. + */ + for (i = 0; i < q->width; ++i) + q->lrc[i]->ring.tail = job->ptrs[i].head; + } +} + /** * xe_guc_submit_pause - Stop further runs of submission tasks on given GuC. * @guc: the &xe_guc struct instance whose scheduler is to be disabled @@ -2035,8 +2170,17 @@ void xe_guc_submit_pause(struct xe_guc *guc) struct xe_exec_queue *q; unsigned long index; - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) - xe_sched_submission_stop_async(&q->guc->sched); + xe_gt_assert(guc_to_gt(guc), vf_recovery(guc)); + + mutex_lock(&guc->submission_state.lock); + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to stop parallel queues */ + if (q->guc->id != index) + continue; + + guc_exec_queue_pause(guc, q); + } + mutex_unlock(&guc->submission_state.lock); } static void guc_exec_queue_start(struct xe_exec_queue *q) @@ -2044,11 +2188,25 @@ static void guc_exec_queue_start(struct xe_exec_queue *q) struct xe_gpu_scheduler *sched = &q->guc->sched; if (!exec_queue_killed_or_banned_or_wedged(q)) { + struct xe_sched_job *job = xe_sched_first_pending_job(sched); int i; trace_xe_exec_queue_resubmit(q); - for (i = 0; i < q->width; ++i) - xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail); + if (job) { + for (i = 0; i < q->width; ++i) { + /* + * The GuC context is unregistered at this point + * time, adjusting software ring tail ensures + * jobs are rewritten in original placement, + * adjusting LRC tail ensures the newly loaded + * GuC / contexts only view the LRC tail + * increasing as jobs are written out. + */ + q->lrc[i]->ring.tail = job->ptrs[i].head; + xe_lrc_set_ring_tail(q->lrc[i], + xe_lrc_ring_head(q->lrc[i])); + } + } xe_sched_resubmit_jobs(sched); } @@ -2079,11 +2237,100 @@ int xe_guc_submit_start(struct xe_guc *guc) return 0; } -static void guc_exec_queue_unpause(struct xe_exec_queue *q) +static void guc_exec_queue_unpause_prepare(struct xe_guc *guc, + struct xe_exec_queue *q) { struct xe_gpu_scheduler *sched = &q->guc->sched; + struct drm_sched_job *s_job; + struct xe_sched_job *job = NULL; + + list_for_each_entry(s_job, &sched->base.pending_list, list) { + job = to_xe_sched_job(s_job); + + xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d", + q->guc->id, xe_sched_job_seqno(job)); + + q->ring_ops->emit_job(job); + job->skip_emit = true; + } + if (job) + job->last_replay = true; +} + +/** + * xe_guc_submit_unpause_prepare - Prepare unpause submission tasks on given GuC. + * @guc: the &xe_guc struct instance whose scheduler is to be prepared for unpause + */ +void xe_guc_submit_unpause_prepare(struct xe_guc *guc) +{ + struct xe_exec_queue *q; + unsigned long index; + + xe_gt_assert(guc_to_gt(guc), vf_recovery(guc)); + + mutex_lock(&guc->submission_state.lock); + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to stop parallel queues */ + if (q->guc->id != index) + continue; + + guc_exec_queue_unpause_prepare(guc, q); + } + mutex_unlock(&guc->submission_state.lock); +} + +static void guc_exec_queue_replay_pending_state_change(struct xe_exec_queue *q) +{ + struct xe_gpu_scheduler *sched = &q->guc->sched; + struct xe_sched_msg *msg; + + if (q->guc->needs_cleanup) { + msg = q->guc->static_msgs + STATIC_MSG_CLEANUP; + + guc_exec_queue_add_msg(q, msg, CLEANUP); + q->guc->needs_cleanup = false; + } + + if (q->guc->needs_suspend) { + msg = q->guc->static_msgs + STATIC_MSG_SUSPEND; + + xe_sched_msg_lock(sched); + guc_exec_queue_try_add_msg_head(q, msg, SUSPEND); + xe_sched_msg_unlock(sched); + + q->guc->needs_suspend = false; + } + + /* + * The resume must be in the message queue before the suspend as it is + * not possible for a resume to be issued if a suspend pending is, but + * the inverse is possible. + */ + if (q->guc->needs_resume) { + msg = q->guc->static_msgs + STATIC_MSG_RESUME; + + xe_sched_msg_lock(sched); + guc_exec_queue_try_add_msg_head(q, msg, RESUME); + xe_sched_msg_unlock(sched); + + q->guc->needs_resume = false; + } +} + +static void guc_exec_queue_unpause(struct xe_guc *guc, struct xe_exec_queue *q) +{ + struct xe_gpu_scheduler *sched = &q->guc->sched; + bool needs_tdr = exec_queue_killed_or_banned_or_wedged(q); + + lockdep_assert_held(&guc->submission_state.lock); + + xe_sched_resubmit_jobs(sched); + guc_exec_queue_replay_pending_state_change(q); xe_sched_submission_start(sched); + if (needs_tdr) + xe_guc_exec_queue_trigger_cleanup(q); + xe_sched_submission_resume_tdr(sched); } /** @@ -2095,10 +2342,43 @@ void xe_guc_submit_unpause(struct xe_guc *guc) struct xe_exec_queue *q; unsigned long index; - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) - guc_exec_queue_unpause(q); + mutex_lock(&guc->submission_state.lock); + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* + * Prevent redundant attempts to stop parallel queues, or queues + * created after resfix done. + */ + if (q->guc->id != index || + !READ_ONCE(q->guc->sched.base.pause_submit)) + continue; - wake_up_all(&guc->ct.wq); + guc_exec_queue_unpause(guc, q); + } + mutex_unlock(&guc->submission_state.lock); +} + +/** + * xe_guc_submit_pause_abort - Abort all paused submission task on given GuC. + * @guc: the &xe_guc struct instance whose scheduler is to be aborted + */ +void xe_guc_submit_pause_abort(struct xe_guc *guc) +{ + struct xe_exec_queue *q; + unsigned long index; + + mutex_lock(&guc->submission_state.lock); + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + struct xe_gpu_scheduler *sched = &q->guc->sched; + + /* Prevent redundant attempts to stop parallel queues */ + if (q->guc->id != index) + continue; + + xe_sched_submission_start(sched); + if (exec_queue_killed_or_banned_or_wedged(q)) + xe_guc_exec_queue_trigger_cleanup(q); + } + mutex_unlock(&guc->submission_state.lock); } static struct xe_exec_queue * @@ -2150,6 +2430,8 @@ static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q, xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q)); q->guc->resume_time = ktime_get(); + clear_exec_queue_pending_resume(q); + clear_exec_queue_pending_tdr_exit(q); clear_exec_queue_pending_enable(q); smp_wmb(); wake_up_all(&guc->ct.wq); @@ -2677,13 +2959,13 @@ int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch) mutex_lock(&guc->submission_state.lock); xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to stop parallel queues */ + if (q->guc->id != index) + continue; + err = xe_exec_queue_contexts_hwsp_rebase(q, scratch); if (err) break; - if (xe_exec_queue_is_parallel(q)) - err = wq_items_rebase(q); - if (err) - break; } mutex_unlock(&guc->submission_state.lock); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h index 78c3f07e31a0..b49a2748ec46 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.h +++ b/drivers/gpu/drm/xe/xe_guc_submit.h @@ -22,9 +22,8 @@ void xe_guc_submit_stop(struct xe_guc *guc); int xe_guc_submit_start(struct xe_guc *guc); void xe_guc_submit_pause(struct xe_guc *guc); void xe_guc_submit_unpause(struct xe_guc *guc); -int xe_guc_submit_reset_block(struct xe_guc *guc); -void xe_guc_submit_reset_unblock(struct xe_guc *guc); -int xe_guc_wait_reset_unblock(struct xe_guc *guc); +void xe_guc_submit_unpause_prepare(struct xe_guc *guc); +void xe_guc_submit_pause_abort(struct xe_guc *guc); void xe_guc_submit_wedge(struct xe_guc *guc); int xe_guc_read_stopped(struct xe_guc *guc); @@ -36,8 +35,6 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg, int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len); int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len); -void xe_guc_jobs_ring_rebase(struct xe_guc *guc); - struct xe_guc_submit_exec_queue_snapshot * xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q); void diff --git a/drivers/gpu/drm/xe/xe_guc_tlb_inval.c b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c index 6bf2103602f8..a80175c7c478 100644 --- a/drivers/gpu/drm/xe/xe_guc_tlb_inval.c +++ b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c @@ -207,7 +207,7 @@ static const struct xe_tlb_inval_ops guc_tlb_inval_ops = { * @guc: GuC object * @tlb_inval: TLB invalidation client * - * Inititialize GuC TLB invalidation by setting back pointer in TLB invalidation + * Initialize GuC TLB invalidation by setting back pointer in TLB invalidation * client to the GuC and setting GuC backend ops. */ void xe_guc_tlb_inval_init_early(struct xe_guc *guc, diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c index a415ca488791..2b3d49dd394c 100644 --- a/drivers/gpu/drm/xe/xe_heci_gsc.c +++ b/drivers/gpu/drm/xe/xe_heci_gsc.c @@ -8,6 +8,8 @@ #include <linux/pci.h> #include <linux/sizes.h> +#include <drm/drm_print.h> + #include "xe_device_types.h" #include "xe_drv.h" #include "xe_heci_gsc.h" diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c index 7e43b2dd6a32..0a70c8924582 100644 --- a/drivers/gpu/drm/xe/xe_huc.c +++ b/drivers/gpu/drm/xe/xe_huc.c @@ -66,14 +66,18 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc) int xe_huc_init(struct xe_huc *huc) { struct xe_gt *gt = huc_to_gt(huc); - struct xe_tile *tile = gt_to_tile(gt); struct xe_device *xe = gt_to_xe(gt); int ret; huc->fw.type = XE_UC_FW_TYPE_HUC; - /* On platforms with a media GT the HuC is only available there */ - if (tile->media_gt && (gt != tile->media_gt)) { + /* + * The HuC is only available on the media GT on most platforms. The + * exception to that rule are the old Xe1 platforms where there was + * no separate GT for media IP, so the HuC was part of the primary + * GT. Such platforms have graphics versions 12.55 and earlier. + */ + if (!xe_gt_is_media_type(gt) && GRAPHICS_VERx100(xe) > 1255) { xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED); return 0; } diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index 1cf623b4a5bc..6a9e2a4272dd 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -346,17 +346,26 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0)); } -static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt, +static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe) { + /* + * Xe3p no longer supports load balance mode, so "fixed cslice" mode + * is automatic and no RCU_MODE programming is required. + */ + if (GRAPHICS_VER(gt_to_xe(gt)) >= 35) + return false; + return xe_gt_ccs_mode_enabled(gt) && - xe_rtp_match_first_render_or_compute(gt, hwe); + xe_rtp_match_first_render_or_compute(xe, gt, hwe); } -static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt, +static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe) { - if (GRAPHICS_VER(gt_to_xe(gt)) < 20) + if (GRAPHICS_VER(xe) < 20) return false; if (hwe->class != XE_ENGINE_CLASS_COMPUTE && @@ -709,27 +718,52 @@ static void read_media_fuses(struct xe_gt *gt) } } +static u32 infer_svccopy_from_meml3(struct xe_gt *gt) +{ + u32 meml3 = REG_FIELD_GET(MEML3_EN_MASK, + xe_mmio_read32(>->mmio, MIRROR_FUSE3)); + u32 svccopy_mask = 0; + + /* + * Each of the four meml3 bits determines the fusing of two service + * copy engines. + */ + for (int i = 0; i < 4; i++) + svccopy_mask |= (meml3 & BIT(i)) ? 0b11 << 2 * i : 0; + + return svccopy_mask; +} + +static u32 read_svccopy_fuses(struct xe_gt *gt) +{ + return REG_FIELD_GET(FUSE_SERVICE_COPY_ENABLE_MASK, + xe_mmio_read32(>->mmio, SERVICE_COPY_ENABLE)); +} + static void read_copy_fuses(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); u32 bcs_mask; - if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270) - return; - xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - bcs_mask = xe_mmio_read32(>->mmio, MIRROR_FUSE3); - bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask); + if (GRAPHICS_VER(xe) >= 35) + bcs_mask = read_svccopy_fuses(gt); + else if (GRAPHICS_VERx100(xe) == 1260) + bcs_mask = infer_svccopy_from_meml3(gt); + else + return; - /* BCS0 is always present; only BCS1-BCS8 may be fused off */ - for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) { + /* Only BCS1-BCS8 may be fused off */ + bcs_mask <<= XE_HW_ENGINE_BCS1; + for (int i = XE_HW_ENGINE_BCS1; i <= XE_HW_ENGINE_BCS8; ++i) { if (!(gt->info.engine_mask & BIT(i))) continue; - if (!(BIT(j / 2) & bcs_mask)) { + if (!(bcs_mask & BIT(i))) { gt->info.engine_mask &= ~BIT(i); - xe_gt_info(gt, "bcs%u fused off\n", j); + xe_gt_info(gt, "bcs%u fused off\n", + i - XE_HW_ENGINE_BCS0); } } } @@ -870,7 +904,7 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec) if (hwe->irq_handler) hwe->irq_handler(hwe, intr_vec); - if (intr_vec & GT_RENDER_USER_INTERRUPT) + if (intr_vec & GT_MI_USER_INTERRUPT) xe_hw_fence_irq_run(hwe->fence_irq); } diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index b6790589e623..97879daeefc1 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -658,8 +658,6 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj, struct xe_reg rapl_limit; struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); - xe_pm_runtime_get(hwmon->xe); - if (hwmon->xe->info.has_mbx_power_limits) { xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval); } else if (power_attr != PL2_HWMON_ATTR) { @@ -669,8 +667,6 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj, } ret = (uval & PWR_LIM_EN) ? attr->mode : 0; - xe_pm_runtime_put(hwmon->xe); - return ret; } @@ -1096,8 +1092,6 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata; int ret; - xe_pm_runtime_get(hwmon->xe); - switch (type) { case hwmon_temp: ret = xe_hwmon_temp_is_visible(hwmon, attr, channel); @@ -1122,8 +1116,6 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, break; } - xe_pm_runtime_put(hwmon->xe); - return ret; } diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c index 48dfcb41fa08..0b5452be0c87 100644 --- a/drivers/gpu/drm/xe/xe_i2c.c +++ b/drivers/gpu/drm/xe/xe_i2c.c @@ -160,6 +160,11 @@ bool xe_i2c_present(struct xe_device *xe) return xe->i2c && xe->i2c->ep.cookie == XE_I2C_EP_COOKIE_DEVICE; } +static bool xe_i2c_irq_present(struct xe_device *xe) +{ + return xe->i2c && xe->i2c->adapter_irq; +} + /** * xe_i2c_irq_handler: Handler for I2C interrupts * @xe: xe device instance @@ -170,13 +175,33 @@ bool xe_i2c_present(struct xe_device *xe) */ void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) { - if (!xe->i2c || !xe->i2c->adapter_irq) + if (!xe_i2c_irq_present(xe)) return; if (master_ctl & I2C_IRQ) generic_handle_irq_safe(xe->i2c->adapter_irq); } +void xe_i2c_irq_reset(struct xe_device *xe) +{ + struct xe_mmio *mmio = xe_root_tile_mmio(xe); + + if (!xe_i2c_irq_present(xe)) + return; + + xe_mmio_rmw32(mmio, I2C_BRIDGE_PCICFGCTL, ACPI_INTR_EN, 0); +} + +void xe_i2c_irq_postinstall(struct xe_device *xe) +{ + struct xe_mmio *mmio = xe_root_tile_mmio(xe); + + if (!xe_i2c_irq_present(xe)) + return; + + xe_mmio_rmw32(mmio, I2C_BRIDGE_PCICFGCTL, 0, ACPI_INTR_EN); +} + static int xe_i2c_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw_irq_num) { @@ -334,6 +359,7 @@ int xe_i2c_probe(struct xe_device *xe) if (ret) goto err_remove_irq; + xe_i2c_irq_postinstall(xe); return devm_add_action_or_reset(drm_dev, xe_i2c_remove, i2c); err_remove_irq: diff --git a/drivers/gpu/drm/xe/xe_i2c.h b/drivers/gpu/drm/xe/xe_i2c.h index ecd5f10358e2..425d8160835f 100644 --- a/drivers/gpu/drm/xe/xe_i2c.h +++ b/drivers/gpu/drm/xe/xe_i2c.h @@ -51,12 +51,16 @@ struct xe_i2c { int xe_i2c_probe(struct xe_device *xe); bool xe_i2c_present(struct xe_device *xe); void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl); +void xe_i2c_irq_postinstall(struct xe_device *xe); +void xe_i2c_irq_reset(struct xe_device *xe); void xe_i2c_pm_suspend(struct xe_device *xe); void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold); #else static inline int xe_i2c_probe(struct xe_device *xe) { return 0; } static inline bool xe_i2c_present(struct xe_device *xe) { return false; } static inline void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) { } +static inline void xe_i2c_irq_postinstall(struct xe_device *xe) { } +static inline void xe_i2c_irq_reset(struct xe_device *xe) { } static inline void xe_i2c_pm_suspend(struct xe_device *xe) { } static inline void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) { } #endif diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 870edaf69388..024e13e606ec 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -139,68 +139,112 @@ void xe_irq_enable_hwe(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); struct xe_mmio *mmio = >->mmio; - u32 ccs_mask, bcs_mask; - u32 irqs, dmask, smask; - u32 gsc_mask = 0; - u32 heci_mask = 0; + u32 common_mask, val, gsc_mask = 0, heci_mask = 0, + rcs_mask = 0, bcs_mask = 0, vcs_mask = 0, vecs_mask = 0, + ccs_mask = 0; if (xe_device_uses_memirq(xe)) return; if (xe_device_uc_enabled(xe)) { - irqs = GT_RENDER_USER_INTERRUPT | - GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; + common_mask = GT_MI_USER_INTERRUPT | + GT_FLUSH_COMPLETE_INTERRUPT; + + /* Enable Compute Walker Interrupt for non-MSIX platforms */ + if (GRAPHICS_VERx100(xe) >= 3511 && !xe_device_has_msix(xe)) { + rcs_mask |= GT_COMPUTE_WALKER_INTERRUPT; + ccs_mask |= GT_COMPUTE_WALKER_INTERRUPT; + } } else { - irqs = GT_RENDER_USER_INTERRUPT | - GT_CS_MASTER_ERROR_INTERRUPT | - GT_CONTEXT_SWITCH_INTERRUPT | - GT_WAIT_SEMAPHORE_INTERRUPT; + common_mask = GT_MI_USER_INTERRUPT | + GT_CS_MASTER_ERROR_INTERRUPT | + GT_CONTEXT_SWITCH_INTERRUPT | + GT_WAIT_SEMAPHORE_INTERRUPT; } - ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE); - bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY); - - dmask = irqs << 16 | irqs; - smask = irqs << 16; + rcs_mask |= common_mask; + bcs_mask |= common_mask; + vcs_mask |= common_mask; + vecs_mask |= common_mask; + ccs_mask |= common_mask; if (xe_gt_is_main_type(gt)) { + /* + * For enabling the interrupts, the information about fused off + * engines doesn't matter much, but this also allows to check if + * the engine is available architecturally in the platform + */ + u32 ccs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE); + u32 bcs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY); + /* Enable interrupts for each engine class */ - xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask); - if (ccs_mask) - xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask); + xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, + REG_FIELD_PREP(ENGINE1_MASK, rcs_mask) | + REG_FIELD_PREP(ENGINE0_MASK, bcs_mask)); + if (ccs_fuse_mask) + xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, + REG_FIELD_PREP(ENGINE1_MASK, ccs_mask)); /* Unmask interrupts for each engine instance */ - xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask); - xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask); - if (bcs_mask & (BIT(1)|BIT(2))) - xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); - if (bcs_mask & (BIT(3)|BIT(4))) - xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); - if (bcs_mask & (BIT(5)|BIT(6))) - xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); - if (bcs_mask & (BIT(7)|BIT(8))) - xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); - if (ccs_mask & (BIT(0)|BIT(1))) - xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask); - if (ccs_mask & (BIT(2)|BIT(3))) - xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask); + val = ~REG_FIELD_PREP(ENGINE1_MASK, rcs_mask); + xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, val); + val = ~REG_FIELD_PREP(ENGINE1_MASK, bcs_mask); + xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, val); + + val = ~(REG_FIELD_PREP(ENGINE1_MASK, bcs_mask) | + REG_FIELD_PREP(ENGINE0_MASK, bcs_mask)); + if (bcs_fuse_mask & (BIT(1)|BIT(2))) + xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, val); + if (bcs_fuse_mask & (BIT(3)|BIT(4))) + xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, val); + if (bcs_fuse_mask & (BIT(5)|BIT(6))) + xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, val); + if (bcs_fuse_mask & (BIT(7)|BIT(8))) + xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, val); + + val = ~(REG_FIELD_PREP(ENGINE1_MASK, ccs_mask) | + REG_FIELD_PREP(ENGINE0_MASK, ccs_mask)); + if (ccs_fuse_mask & (BIT(0)|BIT(1))) + xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, val); + if (ccs_fuse_mask & (BIT(2)|BIT(3))) + xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, val); } if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { + u32 vcs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE); + u32 vecs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE); + u32 other_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER); + /* Enable interrupts for each engine class */ - xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask); + xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, + REG_FIELD_PREP(ENGINE1_MASK, vcs_mask) | + REG_FIELD_PREP(ENGINE0_MASK, vecs_mask)); /* Unmask interrupts for each engine instance */ - xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask); - xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask); - xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask); + val = ~(REG_FIELD_PREP(ENGINE1_MASK, vcs_mask) | + REG_FIELD_PREP(ENGINE0_MASK, vcs_mask)); + if (vcs_fuse_mask & (BIT(0) | BIT(1))) + xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, val); + if (vcs_fuse_mask & (BIT(2) | BIT(3))) + xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, val); + if (vcs_fuse_mask & (BIT(4) | BIT(5))) + xe_mmio_write32(mmio, VCS4_VCS5_INTR_MASK, val); + if (vcs_fuse_mask & (BIT(6) | BIT(7))) + xe_mmio_write32(mmio, VCS6_VCS7_INTR_MASK, val); + + val = ~(REG_FIELD_PREP(ENGINE1_MASK, vecs_mask) | + REG_FIELD_PREP(ENGINE0_MASK, vecs_mask)); + if (vecs_fuse_mask & (BIT(0) | BIT(1))) + xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, val); + if (vecs_fuse_mask & (BIT(2) | BIT(3))) + xe_mmio_write32(mmio, VECS2_VECS3_INTR_MASK, val); /* * the heci2 interrupt is enabled via the same register as the * GSCCS interrupts, but it has its own mask register. */ - if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) { - gsc_mask = irqs | GSC_ER_COMPLETE; + if (other_fuse_mask) { + gsc_mask = common_mask | GSC_ER_COMPLETE; heci_mask = GSC_IRQ_INTF(1); } else if (xe->info.has_heci_gscfi) { gsc_mask = GSC_IRQ_INTF(1); @@ -494,11 +538,15 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) static void gt_irq_reset(struct xe_tile *tile) { struct xe_mmio *mmio = &tile->mmio; - - u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, - XE_ENGINE_CLASS_COMPUTE); - u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, - XE_ENGINE_CLASS_COPY); + u32 ccs_mask = ~0; + u32 bcs_mask = ~0; + + if (tile->primary_gt) { + ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, + XE_ENGINE_CLASS_COMPUTE); + bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, + XE_ENGINE_CLASS_COPY); + } /* Disable RCS, BCS, VCS and VECS class engines. */ xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0); @@ -616,6 +664,7 @@ static void xe_irq_reset(struct xe_device *xe) tile = xe_device_get_root_tile(xe); mask_and_disable(tile, GU_MISC_IRQ_OFFSET); xe_display_irq_reset(xe); + xe_i2c_irq_reset(xe); /* * The tile's top-level status register should be the last one @@ -656,7 +705,8 @@ static void xe_irq_postinstall(struct xe_device *xe) xe_memirq_postinstall(&tile->memirq); } - xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe)); + xe_display_irq_postinstall(xe); + xe_i2c_irq_postinstall(xe); /* * ASLE backlight operations are reported via GUnit GSE interrupts @@ -847,22 +897,6 @@ static int xe_irq_msix_init(struct xe_device *xe) return 0; } -static irqreturn_t guc2host_irq_handler(int irq, void *arg) -{ - struct xe_device *xe = arg; - struct xe_tile *tile; - u8 id; - - if (!atomic_read(&xe->irq.enabled)) - return IRQ_NONE; - - for_each_tile(tile, xe, id) - xe_guc_irq_handler(&tile->primary_gt->uc.guc, - GUC_INTR_GUC2HOST); - - return IRQ_HANDLED; -} - static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg) { unsigned int tile_id, gt_id; @@ -979,7 +1013,7 @@ int xe_irq_msix_request_irqs(struct xe_device *xe) u16 msix; msix = GUC2HOST_MSIX; - err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe, + err = xe_irq_msix_request_irq(xe, xe_irq_handler(xe), xe, DRIVER_NAME "-guc2host", false, &msix); if (err) return err; diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 62fc5a1a332d..4dc1de482eee 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -17,7 +17,7 @@ #include "xe_mmio.h" #include "xe_res_cursor.h" #include "xe_sriov.h" -#include "xe_sriov_printk.h" +#include "xe_tile_sriov_printk.h" /** * DOC: Local Memory Translation Table @@ -32,7 +32,7 @@ */ #define lmtt_assert(lmtt, condition) xe_tile_assert(lmtt_to_tile(lmtt), condition) -#define lmtt_debug(lmtt, msg...) xe_sriov_dbg_verbose(lmtt_to_xe(lmtt), "LMTT: " msg) +#define lmtt_debug(lmtt, msg...) xe_tile_sriov_dbg_verbose(lmtt_to_tile(lmtt), "LMTT: " msg) static bool xe_has_multi_level_lmtt(struct xe_device *xe) { @@ -267,15 +267,14 @@ static int lmtt_invalidate_hw(struct xe_lmtt *lmtt) */ void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt) { - struct xe_device *xe = lmtt_to_xe(lmtt); int err; - lmtt_assert(lmtt, IS_SRIOV_PF(xe)); + lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt))); err = lmtt_invalidate_hw(lmtt); if (err) - xe_sriov_warn(xe, "LMTT%u invalidation failed (%pe)", - lmtt_to_tile(lmtt)->id, ERR_PTR(err)); + xe_tile_sriov_err(lmtt_to_tile(lmtt), "LMTT invalidation failed (%pe)", + ERR_PTR(err)); } static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt, diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 47e9df775072..b5083c99dd50 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -1214,8 +1214,7 @@ static int setup_bo(struct bo_setup_state *state) ssize_t remain; if (state->lrc->bo->vmap.is_iomem) { - if (!state->buffer) - return -ENOMEM; + xe_gt_assert(state->hwe->gt, state->buffer); state->ptr = state->buffer; } else { state->ptr = state->lrc->bo->vmap.vaddr + state->offset; @@ -1248,7 +1247,7 @@ fail: static void finish_bo(struct bo_setup_state *state) { - if (!state->buffer) + if (!state->lrc->bo->vmap.is_iomem) return; xe_map_memcpy_to(gt_to_xe(state->lrc->gt), &state->lrc->bo->vmap, @@ -1303,8 +1302,11 @@ static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe) u32 *buf = NULL; int ret; - if (lrc->bo->vmap.is_iomem) + if (lrc->bo->vmap.is_iomem) { buf = kmalloc(LRC_WA_BB_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } ret = xe_lrc_setup_wa_bb_with_scratch(lrc, hwe, buf); @@ -1347,8 +1349,11 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe) if (xe_gt_WARN_ON(lrc->gt, !state.funcs)) return 0; - if (lrc->bo->vmap.is_iomem) + if (lrc->bo->vmap.is_iomem) { state.buffer = kmalloc(state.max_size, GFP_KERNEL); + if (!state.buffer) + return -ENOMEM; + } ret = setup_bo(&state); if (ret) { @@ -1412,8 +1417,9 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE; - if (vm && vm->xef) /* userspace */ - bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE; + + if ((vm && vm->xef) || init_flags & XE_LRC_CREATE_USER_CTX) /* userspace */ + bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE | XE_BO_FLAG_FORCE_USER_VRAM; lrc->bo = xe_bo_create_pin_map_novm(xe, tile, bo_size, diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h index 188565465779..2fb628da5c43 100644 --- a/drivers/gpu/drm/xe/xe_lrc.h +++ b/drivers/gpu/drm/xe/xe_lrc.h @@ -44,8 +44,10 @@ struct xe_lrc_snapshot { #define LRC_WA_BB_SIZE SZ_4K -#define XE_LRC_CREATE_RUNALONE 0x1 -#define XE_LRC_CREATE_PXP 0x2 +#define XE_LRC_CREATE_RUNALONE BIT(0) +#define XE_LRC_CREATE_PXP BIT(1) +#define XE_LRC_CREATE_USER_CTX BIT(2) + struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm, u32 ring_size, u16 msix_vec, u32 flags); void xe_lrc_destroy(struct kref *ref); @@ -74,6 +76,16 @@ static inline void xe_lrc_put(struct xe_lrc *lrc) kref_put(&lrc->refcount, xe_lrc_destroy); } +/** + * xe_lrc_ring_size() - Xe LRC ring size + * + * Return: Size of LRC ring buffer + */ +static inline size_t xe_lrc_ring_size(void) +{ + return SZ_16K; +} + size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class); u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc); u32 xe_lrc_regs_offset(struct xe_lrc *lrc); diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h index 8d67f6ba2d95..c44777125691 100644 --- a/drivers/gpu/drm/xe/xe_map.h +++ b/drivers/gpu/drm/xe/xe_map.h @@ -14,9 +14,9 @@ * DOC: Map layer * * All access to any memory shared with a device (both sysmem and vram) in the - * XE driver should go through this layer (xe_map). This layer is built on top + * Xe driver should go through this layer (xe_map). This layer is built on top * of :ref:`driver-api/device-io:Generalizing Access to System and I/O Memory` - * and with extra hooks into the XE driver that allows adding asserts to memory + * and with extra hooks into the Xe driver that allows adding asserts to memory * accesses (e.g. for blocking runtime_pm D3Cold on Discrete Graphics). */ @@ -78,24 +78,6 @@ static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map, iosys_map_wr(map__, offset__, type__, val__); \ }) -#define xe_map_rd_array(xe__, map__, index__, type__) \ - xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__) - -#define xe_map_wr_array(xe__, map__, index__, type__, val__) \ - xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__) - -#define xe_map_rd_array_u32(xe__, map__, index__) \ - xe_map_rd_array(xe__, map__, index__, u32) - -#define xe_map_wr_array_u32(xe__, map__, index__, val__) \ - xe_map_wr_array(xe__, map__, index__, u32, val__) - -#define xe_map_rd_ring_u32(xe__, map__, index__, size__) \ - xe_map_rd_array_u32(xe__, map__, (index__) % (size__)) - -#define xe_map_wr_ring_u32(xe__, map__, index__, size__, val__) \ - xe_map_wr_array_u32(xe__, map__, (index__) % (size__), val__) - #define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \ struct xe_device *__xe = xe__; \ xe_device_assert_mem_access(__xe); \ diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c index 49c45ec3e83c..b0c7ce0a5d1e 100644 --- a/drivers/gpu/drm/xe/xe_memirq.c +++ b/drivers/gpu/drm/xe/xe_memirq.c @@ -14,16 +14,15 @@ #include "xe_device.h" #include "xe_device_types.h" #include "xe_gt.h" -#include "xe_gt_printk.h" #include "xe_guc.h" #include "xe_hw_engine.h" #include "xe_map.h" #include "xe_memirq.h" +#include "xe_tile_printk.h" #define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition) #define memirq_printk(m, _level, _fmt, ...) \ - drm_##_level(&memirq_to_xe(m)->drm, "MEMIRQ%u: " _fmt, \ - memirq_to_tile(m)->id, ##__VA_ARGS__) + xe_tile_##_level(memirq_to_tile(m), "MEMIRQ: " _fmt, ##__VA_ARGS__) #ifdef CONFIG_DRM_XE_DEBUG_MEMIRQ #define memirq_debug(m, _fmt, ...) memirq_printk(m, dbg, _fmt, ##__VA_ARGS__) @@ -398,8 +397,9 @@ void xe_memirq_postinstall(struct xe_memirq *memirq) memirq_set_enable(memirq, true); } -static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector, - u16 offset, const char *name) +static bool __memirq_received(struct xe_memirq *memirq, + struct iosys_map *vector, u16 offset, + const char *name, bool clear) { u8 value; @@ -409,19 +409,33 @@ static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector, memirq_err_ratelimited(memirq, "Unexpected memirq value %#x from %s at %u\n", value, name, offset); - iosys_map_wr(vector, offset, u8, 0x00); + if (clear) + iosys_map_wr(vector, offset, u8, 0x00); } return value; } +static bool memirq_received_noclear(struct xe_memirq *memirq, + struct iosys_map *vector, + u16 offset, const char *name) +{ + return __memirq_received(memirq, vector, offset, name, false); +} + +static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector, + u16 offset, const char *name) +{ + return __memirq_received(memirq, vector, offset, name, true); +} + static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status, struct xe_hw_engine *hwe) { memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr); - if (memirq_received(memirq, status, ilog2(GT_RENDER_USER_INTERRUPT), hwe->name)) - xe_hw_engine_handle_irq(hwe, GT_RENDER_USER_INTERRUPT); + if (memirq_received(memirq, status, ilog2(GT_MI_USER_INTERRUPT), hwe->name)) + xe_hw_engine_handle_irq(hwe, GT_MI_USER_INTERRUPT); } static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status, @@ -434,8 +448,16 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name)) xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST); - if (memirq_received(memirq, status, ilog2(GUC_INTR_SW_INT_0), name)) + /* + * This is a software interrupt that must be cleared after it's consumed + * to avoid race conditions where xe_gt_sriov_vf_recovery_pending() + * returns false. + */ + if (memirq_received_noclear(memirq, status, ilog2(GUC_INTR_SW_INT_0), + name)) { xe_guc_irq_handler(guc, GUC_INTR_SW_INT_0); + iosys_map_wr(status, ilog2(GUC_INTR_SW_INT_0), u8, 0x00); + } } /** @@ -461,6 +483,23 @@ void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe) } /** + * xe_memirq_guc_sw_int_0_irq_pending() - SW_INT_0 IRQ is pending + * @memirq: the &xe_memirq + * @guc: the &xe_guc to check for IRQ + * + * Return: True if SW_INT_0 IRQ is pending on @guc, False otherwise + */ +bool xe_memirq_guc_sw_int_0_irq_pending(struct xe_memirq *memirq, struct xe_guc *guc) +{ + struct xe_gt *gt = guc_to_gt(guc); + u32 offset = xe_gt_is_media_type(gt) ? ilog2(INTR_MGUC) : ilog2(INTR_GUC); + struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&memirq->status, offset * SZ_16); + + return memirq_received_noclear(memirq, &map, ilog2(GUC_INTR_SW_INT_0), + guc_name(guc)); +} + +/** * xe_memirq_handler - The `Memory Based Interrupts`_ Handler. * @memirq: the &xe_memirq * diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h index 06130650e9d6..e25d2234ab87 100644 --- a/drivers/gpu/drm/xe/xe_memirq.h +++ b/drivers/gpu/drm/xe/xe_memirq.h @@ -25,4 +25,6 @@ void xe_memirq_handler(struct xe_memirq *memirq); int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc); +bool xe_memirq_guc_sw_int_0_irq_pending(struct xe_memirq *memirq, struct xe_guc *guc); + #endif diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index a36ce7dce8cc..2184af413b91 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -29,6 +29,7 @@ #include "xe_lrc.h" #include "xe_map.h" #include "xe_mocs.h" +#include "xe_printk.h" #include "xe_pt.h" #include "xe_res_cursor.h" #include "xe_sa.h" @@ -57,6 +58,13 @@ struct xe_migrate { u64 usm_batch_base_ofs; /** @cleared_mem_ofs: VM offset of @cleared_bo. */ u64 cleared_mem_ofs; + /** @large_page_copy_ofs: VM offset of 2M pages used for large copies */ + u64 large_page_copy_ofs; + /** + * @large_page_copy_pdes: BO offset to writeout 2M pages (PDEs) used for + * large copies + */ + u64 large_page_copy_pdes; /** * @fence: dma-fence representing the last migration job batch. * Protected by @job_mutex. @@ -288,6 +296,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, (i + 1) * 8, u64, entry); } + /* Reserve 2M PDEs */ + level = 1; + m->large_page_copy_ofs = NUM_PT_SLOTS << xe_pt_shift(level); + m->large_page_copy_pdes = map_ofs + XE_PAGE_SIZE * level + + NUM_PT_SLOTS * 8; + /* Set up a 1GiB NULL mapping at 255GiB offset. */ level = 2; xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64, @@ -686,9 +700,9 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb, } #define EMIT_COPY_DW 10 -static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, - u64 src_ofs, u64 dst_ofs, unsigned int size, - unsigned int pitch) +static void emit_xy_fast_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, + u64 dst_ofs, unsigned int size, + unsigned int pitch) { struct xe_device *xe = gt_to_xe(gt); u32 mocs = 0; @@ -717,6 +731,61 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, bb->cs[bb->len++] = upper_32_bits(src_ofs); } +#define PAGE_COPY_MODE_PS SZ_256 /* hw uses 256 bytes as the page-size */ +static void emit_mem_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, + u64 dst_ofs, unsigned int size, unsigned int pitch) +{ + u32 mode, copy_type, width; + + xe_gt_assert(gt, IS_ALIGNED(size, pitch)); + xe_gt_assert(gt, pitch <= U16_MAX); + xe_gt_assert(gt, pitch); + xe_gt_assert(gt, size); + + if (IS_ALIGNED(size, PAGE_COPY_MODE_PS) && + IS_ALIGNED(lower_32_bits(src_ofs), PAGE_COPY_MODE_PS) && + IS_ALIGNED(lower_32_bits(dst_ofs), PAGE_COPY_MODE_PS)) { + mode = MEM_COPY_PAGE_COPY_MODE; + copy_type = 0; /* linear copy */ + width = size / PAGE_COPY_MODE_PS; + } else if (pitch > 1) { + xe_gt_assert(gt, size / pitch <= U16_MAX); + mode = 0; /* BYTE_COPY */ + copy_type = MEM_COPY_MATRIX_COPY; + width = pitch; + } else { + mode = 0; /* BYTE_COPY */ + copy_type = 0; /* linear copy */ + width = size; + } + + xe_gt_assert(gt, width <= U16_MAX); + + bb->cs[bb->len++] = MEM_COPY_CMD | mode | copy_type; + bb->cs[bb->len++] = width - 1; + bb->cs[bb->len++] = size / pitch - 1; /* ignored by hw for page-copy/linear above */ + bb->cs[bb->len++] = pitch - 1; + bb->cs[bb->len++] = pitch - 1; + bb->cs[bb->len++] = lower_32_bits(src_ofs); + bb->cs[bb->len++] = upper_32_bits(src_ofs); + bb->cs[bb->len++] = lower_32_bits(dst_ofs); + bb->cs[bb->len++] = upper_32_bits(dst_ofs); + bb->cs[bb->len++] = FIELD_PREP(MEM_COPY_SRC_MOCS_INDEX_MASK, gt->mocs.uc_index) | + FIELD_PREP(MEM_COPY_DST_MOCS_INDEX_MASK, gt->mocs.uc_index); +} + +static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, + u64 src_ofs, u64 dst_ofs, unsigned int size, + unsigned int pitch) +{ + struct xe_device *xe = gt_to_xe(gt); + + if (xe->info.has_mem_copy_instr) + emit_mem_copy(gt, bb, src_ofs, dst_ofs, size, pitch); + else + emit_xy_fast_copy(gt, bb, src_ofs, dst_ofs, size, pitch); +} + static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm) { return usm ? m->usm_batch_base_ofs : m->batch_base_ofs; @@ -834,7 +903,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, &ccs_it); while (size) { - u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */ + u32 batch_size = 1; /* MI_BATCH_BUFFER_END */ struct xe_sched_job *job; struct xe_bb *bb; u32 flush_flags = 0; @@ -980,15 +1049,27 @@ struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate) return migrate->q->lrc[0]; } -static int emit_flush_invalidate(struct xe_exec_queue *q, u32 *dw, int i, - u32 flags) +static u64 migrate_vm_ppgtt_addr_tlb_inval(void) +{ + /* + * The migrate VM is self-referential so it can modify its own PTEs (see + * pte_update_size() or emit_pte() functions). We reserve NUM_KERNEL_PDE + * entries for kernel operations (copies, clears, CCS migrate), and + * suballocate the rest to user operations (binds/unbinds). With + * NUM_KERNEL_PDE = 15, NUM_KERNEL_PDE - 1 is already used for PTE updates, + * so assign NUM_KERNEL_PDE - 2 for TLB invalidation. + */ + return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE; +} + +static int emit_flush_invalidate(u32 *dw, int i, u32 flags) { - struct xe_lrc *lrc = xe_exec_queue_lrc(q); + u64 addr = migrate_vm_ppgtt_addr_tlb_inval(); + dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW | flags; - dw[i++] = lower_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc)) | - MI_FLUSH_DW_USE_GTT; - dw[i++] = upper_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc)); + dw[i++] = lower_32_bits(addr); + dw[i++] = upper_32_bits(addr); dw[i++] = MI_NOOP; dw[i++] = MI_NOOP; @@ -1101,11 +1182,11 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); - bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags); + bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags); flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt, src_L0_ofs, dst_is_pltt, src_L0, ccs_ofs, true); - bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags); + bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags); size -= src_L0; } @@ -1130,6 +1211,128 @@ struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate) return migrate->q; } +/** + * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object. + * @vram_bo: The VRAM buffer object. + * @vram_offset: The VRAM offset. + * @sysmem_bo: The sysmem buffer object. + * @sysmem_offset: The sysmem offset. + * @size: The size of VRAM chunk to copy. + * @dir: The direction of the copy operation. + * + * Copies a portion of a buffer object between VRAM and system memory. + * On Xe2 platforms that support flat CCS, VRAM data is decompressed when + * copying to system memory. + * + * Return: Pointer to a dma_fence representing the last copy batch, or + * an error pointer on failure. If there is a failure, any copy operation + * started by the function call has been synced. + */ +struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset, + struct xe_bo *sysmem_bo, u64 sysmem_offset, + u64 size, enum xe_migrate_copy_dir dir) +{ + struct xe_device *xe = xe_bo_device(vram_bo); + struct xe_tile *tile = vram_bo->tile; + struct xe_gt *gt = tile->primary_gt; + struct xe_migrate *m = tile->migrate; + struct dma_fence *fence = NULL; + struct ttm_resource *vram = vram_bo->ttm.resource; + struct ttm_resource *sysmem = sysmem_bo->ttm.resource; + struct xe_res_cursor vram_it, sysmem_it; + u64 vram_L0_ofs, sysmem_L0_ofs; + u32 vram_L0_pt, sysmem_L0_pt; + u64 vram_L0, sysmem_L0; + bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM); + bool use_comp_pat = to_sysmem && + GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe); + int pass = 0; + int err; + + xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE)); + xe_assert(xe, xe_bo_is_vram(vram_bo)); + xe_assert(xe, !xe_bo_is_vram(sysmem_bo)); + xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size)); + xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size)); + + xe_res_first(vram, vram_offset, size, &vram_it); + xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it); + + while (size) { + u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM; + u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */ + struct xe_sched_job *job; + struct xe_bb *bb; + u32 update_idx; + bool usm = xe->info.has_usm; + u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; + + sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it); + vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0); + + xe_dbg(xe, "Pass %u, size: %llu\n", pass++, vram_L0); + + pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0; + batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0, + &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts); + + batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs, + &sysmem_L0_pt, 0, avail_pts, avail_pts); + batch_size += EMIT_COPY_DW; + + bb = xe_bb_new(gt, batch_size, usm); + if (IS_ERR(bb)) { + err = PTR_ERR(bb); + return ERR_PTR(err); + } + + if (xe_migrate_allow_identity(vram_L0, &vram_it)) + xe_res_next(&vram_it, vram_L0); + else + emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram); + + emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem); + + bb->cs[bb->len++] = MI_BATCH_BUFFER_END; + update_idx = bb->len; + + if (to_sysmem) + emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE); + else + emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE); + + job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm), + update_idx); + if (IS_ERR(job)) { + xe_bb_free(bb, NULL); + err = PTR_ERR(job); + return ERR_PTR(err); + } + + xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB); + + xe_assert(xe, dma_resv_test_signaled(vram_bo->ttm.base.resv, + DMA_RESV_USAGE_BOOKKEEP)); + xe_assert(xe, dma_resv_test_signaled(sysmem_bo->ttm.base.resv, + DMA_RESV_USAGE_BOOKKEEP)); + + scoped_guard(mutex, &m->job_mutex) { + xe_sched_job_arm(job); + dma_fence_put(fence); + fence = dma_fence_get(&job->drm.s_fence->finished); + xe_sched_job_push(job); + + dma_fence_put(m->fence); + m->fence = dma_fence_get(fence); + } + + xe_bb_free(bb, fence); + size -= vram_L0; + } + + return fence; +} + static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, u32 size, u32 pitch) { @@ -1287,7 +1490,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, /* Calculate final sizes and batch size.. */ pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; - batch_size = 2 + + batch_size = 1 + pte_update_size(m, pte_flags, src, &src_it, &clear_L0, &clear_L0_ofs, &clear_L0_pt, clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0, @@ -1766,16 +1969,22 @@ static u32 pte_update_cmd_size(u64 size) static void build_pt_update_batch_sram(struct xe_migrate *m, struct xe_bb *bb, u32 pt_offset, struct drm_pagemap_addr *sram_addr, - u32 size) + u32 size, int level) { u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB]; + u64 gpu_page_size = 0x1ull << xe_pt_shift(level); u32 ptes; int i = 0; - ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE); + xe_tile_assert(m->tile, PAGE_ALIGNED(size)); + + ptes = DIV_ROUND_UP(size, gpu_page_size); while (ptes) { u32 chunk = min(MAX_PTE_PER_SDI, ptes); + if (!level) + chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE); + bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); bb->cs[bb->len++] = pt_offset; bb->cs[bb->len++] = 0; @@ -1784,30 +1993,70 @@ static void build_pt_update_batch_sram(struct xe_migrate *m, ptes -= chunk; while (chunk--) { - u64 addr = sram_addr[i].addr & PAGE_MASK; + u64 addr = sram_addr[i].addr; + u64 pte; xe_tile_assert(m->tile, sram_addr[i].proto == DRM_INTERCONNECT_SYSTEM); xe_tile_assert(m->tile, addr); - addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe, - addr, pat_index, - 0, false, 0); - bb->cs[bb->len++] = lower_32_bits(addr); - bb->cs[bb->len++] = upper_32_bits(addr); - - i++; + xe_tile_assert(m->tile, PAGE_ALIGNED(addr)); + +again: + pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe, + addr, pat_index, + level, false, 0); + bb->cs[bb->len++] = lower_32_bits(pte); + bb->cs[bb->len++] = upper_32_bits(pte); + + if (gpu_page_size < PAGE_SIZE) { + addr += XE_PAGE_SIZE; + if (!PAGE_ALIGNED(addr)) { + chunk--; + goto again; + } + i++; + } else { + i += gpu_page_size / PAGE_SIZE; + } } } } -enum xe_migrate_copy_dir { - XE_MIGRATE_COPY_TO_VRAM, - XE_MIGRATE_COPY_TO_SRAM, -}; +static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr, + unsigned long size) +{ + u32 large_size = (0x1 << xe_pt_shift(1)); + unsigned long i, incr = large_size / PAGE_SIZE; + + for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE); i += incr) + if (PAGE_SIZE << sram_addr[i].order != large_size) + return false; + + return true; +} #define XE_CACHELINE_BYTES 64ull #define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1) +static u32 xe_migrate_copy_pitch(struct xe_device *xe, u32 len) +{ + u32 pitch; + + if (IS_ALIGNED(len, PAGE_SIZE)) + pitch = PAGE_SIZE; + else if (IS_ALIGNED(len, SZ_4K)) + pitch = SZ_4K; + else if (IS_ALIGNED(len, SZ_256)) + pitch = SZ_256; + else if (IS_ALIGNED(len, 4)) + pitch = 4; + else + pitch = 1; + + xe_assert(xe, pitch > 1 || xe->info.has_mem_copy_instr); + return pitch; +} + static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, unsigned long len, unsigned long sram_offset, @@ -1819,24 +2068,25 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, struct xe_device *xe = gt_to_xe(gt); bool use_usm_batch = xe->info.has_usm; struct dma_fence *fence = NULL; - u32 batch_size = 2; + u32 batch_size = 1; u64 src_L0_ofs, dst_L0_ofs; struct xe_sched_job *job; struct xe_bb *bb; u32 update_idx, pt_slot = 0; unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE); - unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ? - PAGE_SIZE : 4; + unsigned int pitch = xe_migrate_copy_pitch(xe, len); int err; unsigned long i, j; + bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset); - if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) || - (sram_offset | vram_addr) & XE_CACHELINE_MASK)) + if (!xe->info.has_mem_copy_instr && + drm_WARN_ON(&xe->drm, + (!IS_ALIGNED(len, pitch)) || (sram_offset | vram_addr) & XE_CACHELINE_MASK)) return ERR_PTR(-EOPNOTSUPP); xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER); - batch_size += pte_update_cmd_size(len); + batch_size += pte_update_cmd_size(npages << PAGE_SHIFT); batch_size += EMIT_COPY_DW; bb = xe_bb_new(gt, batch_size, use_usm_batch); @@ -1853,7 +2103,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, * struct drm_pagemap_addr. Ensure this is the case even with higher * orders. */ - for (i = 0; i < npages;) { + for (i = 0; !use_pde && i < npages;) { unsigned int order = sram_addr[i].order; for (j = 1; j < NR_PAGES(order) && i + j < npages; j++) @@ -1863,16 +2113,26 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, i += NR_PAGES(order); } - build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE, - sram_addr, len + sram_offset); + if (use_pde) + build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes, + sram_addr, npages << PAGE_SHIFT, 1); + else + build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE, + sram_addr, npages << PAGE_SHIFT, 0); if (dir == XE_MIGRATE_COPY_TO_VRAM) { - src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset; + if (use_pde) + src_L0_ofs = m->large_page_copy_ofs + sram_offset; + else + src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset; dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false); } else { src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false); - dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset; + if (use_pde) + dst_L0_ofs = m->large_page_copy_ofs + sram_offset; + else + dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset; } bb->cs[bb->len++] = MI_BATCH_BUFFER_END; @@ -1918,7 +2178,7 @@ err: * * Copy from an array dma addresses to a VRAM device physical address * - * Return: dma fence for migrate to signal completion on succees, ERR_PTR on + * Return: dma fence for migrate to signal completion on success, ERR_PTR on * failure */ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, @@ -1939,7 +2199,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, * * Copy from a VRAM device physical address to an array dma addresses * - * Return: dma fence for migrate to signal completion on succees, ERR_PTR on + * Return: dma fence for migrate to signal completion on success, ERR_PTR on * failure */ struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m, @@ -2040,8 +2300,10 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, xe_bo_assert_held(bo); /* Use bounce buffer for small access and unaligned access */ - if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) || - !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) { + if (!xe->info.has_mem_copy_instr && + (!IS_ALIGNED(len, 4) || + !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) || + !IS_ALIGNED(offset, XE_CACHELINE_BYTES))) { int buf_offset = 0; void *bounce; int err; @@ -2103,6 +2365,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) + cursor.start; int current_bytes; + u32 pitch; if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER) current_bytes = min_t(int, bytes_left, @@ -2110,13 +2373,13 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, else current_bytes = min_t(int, bytes_left, cursor.size); - if (current_bytes & ~PAGE_MASK) { - int pitch = 4; - + pitch = xe_migrate_copy_pitch(xe, current_bytes); + if (xe->info.has_mem_copy_instr) + current_bytes = min_t(int, current_bytes, U16_MAX * pitch); + else current_bytes = min_t(int, current_bytes, round_down(S16_MAX * pitch, XE_CACHELINE_BYTES)); - } __fence = xe_migrate_vram(m, current_bytes, (unsigned long)buf & ~PAGE_MASK, @@ -2188,6 +2451,20 @@ void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q) xe_vm_assert_held(q->vm); /* User queues VM's should be locked */ } +#if IS_ENABLED(CONFIG_PROVE_LOCKING) +/** + * xe_migrate_job_lock_assert() - Assert migrate job lock held of queue + * @q: Migrate queue + */ +void xe_migrate_job_lock_assert(struct xe_exec_queue *q) +{ + struct xe_migrate *m = gt_to_tile(q->gt)->migrate; + + xe_gt_assert(q->gt, q == m->q); + lockdep_assert_held(&m->job_mutex); +} +#endif + #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) #include "tests/xe_migrate.c" #endif diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h index 4fad324b6253..260e298e5dd7 100644 --- a/drivers/gpu/drm/xe/xe_migrate.h +++ b/drivers/gpu/drm/xe/xe_migrate.h @@ -28,6 +28,11 @@ struct xe_vma; enum xe_sriov_vf_ccs_rw_ctxs; +enum xe_migrate_copy_dir { + XE_MIGRATE_COPY_TO_VRAM, + XE_MIGRATE_COPY_TO_SRAM, +}; + /** * struct xe_migrate_pt_update_ops - Callbacks for the * xe_migrate_update_pgtables() function. @@ -131,6 +136,9 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate); struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate); +struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset, + struct xe_bo *sysmem_bo, u64 sysmem_offset, + u64 size, enum xe_migrate_copy_dir dir); int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, unsigned long offset, void *buf, int len, int write); @@ -152,6 +160,14 @@ xe_migrate_update_pgtables(struct xe_migrate *m, void xe_migrate_wait(struct xe_migrate *m); +#if IS_ENABLED(CONFIG_PROVE_LOCKING) +void xe_migrate_job_lock_assert(struct xe_exec_queue *q); +#else +static inline void xe_migrate_job_lock_assert(struct xe_exec_queue *q) +{ +} +#endif + void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q); void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q); diff --git a/drivers/gpu/drm/xe/xe_migrate_doc.h b/drivers/gpu/drm/xe/xe_migrate_doc.h index 63c7d67b5b62..c082bc0b7068 100644 --- a/drivers/gpu/drm/xe/xe_migrate_doc.h +++ b/drivers/gpu/drm/xe/xe_migrate_doc.h @@ -9,7 +9,7 @@ /** * DOC: Migrate Layer * - * The XE migrate layer is used generate jobs which can copy memory (eviction), + * The Xe migrate layer is used generate jobs which can copy memory (eviction), * clear memory, or program tables (binds). This layer exists in every GT, has * a migrate engine, and uses a special VM for all generated jobs. * diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index ef6f3ea573a2..350dca1f0925 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -379,3 +379,32 @@ int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 va { return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false); } + +#ifdef CONFIG_PCI_IOV +static size_t vf_regs_stride(struct xe_device *xe) +{ + return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000; +} + +/** + * xe_mmio_init_vf_view() - Initialize an MMIO instance for accesses like the VF + * @mmio: the target &xe_mmio to initialize as VF's view + * @base: the source &xe_mmio to initialize from + * @vfid: the VF identifier + */ +void xe_mmio_init_vf_view(struct xe_mmio *mmio, const struct xe_mmio *base, unsigned int vfid) +{ + struct xe_tile *tile = base->tile; + struct xe_device *xe = tile->xe; + size_t offset = vf_regs_stride(xe) * vfid; + + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid); + xe_assert(xe, !base->sriov_vf_gt); + xe_assert(xe, base->regs_size > offset); + + *mmio = *base; + mmio->regs += offset; + mmio->regs_size -= offset; +} +#endif diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h index c151ba569003..15362789ab99 100644 --- a/drivers/gpu/drm/xe/xe_mmio.h +++ b/drivers/gpu/drm/xe/xe_mmio.h @@ -42,4 +42,8 @@ static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe) return &xe->tiles[0].mmio; } +#ifdef CONFIG_PCI_IOV +void xe_mmio_init_vf_view(struct xe_mmio *mmio, const struct xe_mmio *base, unsigned int vfid); +#endif + #endif diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c index 0c737413fcb6..6613d3b48a84 100644 --- a/drivers/gpu/drm/xe/xe_mocs.c +++ b/drivers/gpu/drm/xe/xe_mocs.c @@ -568,6 +568,23 @@ static const struct xe_mocs_ops xe2_mocs_ops = { .dump = xe2_mocs_dump, }; +/* + * Note that the "L3" and "L4" register fields actually control the L2 and L3 + * caches respectively on this platform. + */ +static const struct xe_mocs_entry xe3p_xpc_mocs_table[] = { + /* Defer to PAT */ + MOCS_ENTRY(0, XE2_L3_0_WB | L4_3_UC, 0), + /* UC */ + MOCS_ENTRY(1, IG_PAT | XE2_L3_3_UC | L4_3_UC, 0), + /* L2 */ + MOCS_ENTRY(2, IG_PAT | XE2_L3_0_WB | L4_3_UC, 0), + /* L3 */ + MOCS_ENTRY(3, IG_PAT | XE2_L3_3_UC | L4_0_WB, 0), + /* L2 + L3 */ + MOCS_ENTRY(4, IG_PAT | XE2_L3_0_WB | L4_0_WB, 0), +}; + static unsigned int get_mocs_settings(struct xe_device *xe, struct xe_mocs_info *info) { @@ -576,6 +593,16 @@ static unsigned int get_mocs_settings(struct xe_device *xe, memset(info, 0, sizeof(struct xe_mocs_info)); switch (xe->info.platform) { + case XE_CRESCENTISLAND: + info->ops = &xe2_mocs_ops; + info->table_size = ARRAY_SIZE(xe3p_xpc_mocs_table); + info->table = xe3p_xpc_mocs_table; + info->num_mocs_regs = XE2_NUM_MOCS_ENTRIES; + info->uc_index = 1; + info->wb_index = 4; + info->unused_entries_index = 4; + break; + case XE_NOVALAKE_S: case XE_PANTHERLAKE: case XE_LUNARLAKE: case XE_BATTLEMAGE: @@ -772,12 +799,20 @@ void xe_mocs_init(struct xe_gt *gt) init_l3cc_table(gt, &table); } -void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p) +/** + * xe_mocs_dump() - Dump MOCS table. + * @gt: the &xe_gt with MOCS table + * @p: the &drm_printer to dump info to + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); enum xe_force_wake_domains domain; struct xe_mocs_info table; unsigned int fw_ref, flags; + int err = 0; flags = get_mocs_settings(xe, &table); @@ -785,14 +820,17 @@ void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p) xe_pm_runtime_get_noresume(xe); fw_ref = xe_force_wake_get(gt_to_fw(gt), domain); - if (!xe_force_wake_ref_has_domain(fw_ref, domain)) + if (!xe_force_wake_ref_has_domain(fw_ref, domain)) { + err = -ETIMEDOUT; goto err_fw; + } table.ops->dump(&table, flags, gt, p); err_fw: xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); + return err; } #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) diff --git a/drivers/gpu/drm/xe/xe_mocs.h b/drivers/gpu/drm/xe/xe_mocs.h index dc972ffd4d07..f00bbb269829 100644 --- a/drivers/gpu/drm/xe/xe_mocs.h +++ b/drivers/gpu/drm/xe/xe_mocs.h @@ -11,12 +11,6 @@ struct xe_gt; void xe_mocs_init_early(struct xe_gt *gt); void xe_mocs_init(struct xe_gt *gt); - -/** - * xe_mocs_dump - Dump mocs table - * @gt: GT structure - * @p: Printer to dump info to - */ -void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p); +int xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p); #endif diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index a4894eb0d7f3..890c363282ae 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -10,6 +10,7 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_syncobj.h> #include <uapi/drm/xe_drm.h> #include <generated/xe_wa_oob.h> @@ -837,7 +838,8 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream) xe_oa_configure_oa_context(stream, false); /* Make sure we disable noa to save power. */ - xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0); + if (GT_VER(stream->gt) < 35) + xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0); sqcnt1 = SQCNT1_PMON_ENABLE | (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0); @@ -868,7 +870,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream) xe_oa_free_oa_buffer(stream); - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(gt), stream->fw_ref); xe_pm_runtime_put(stream->oa->xe); /* Wa_1509372804:pvc: Unset the override of GUCRC mode to enable rc6 */ @@ -1389,7 +1391,9 @@ static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from fro return 0; } -static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param) +static int xe_oa_parse_syncs(struct xe_oa *oa, + struct xe_oa_stream *stream, + struct xe_oa_open_param *param) { int ret, num_syncs, num_ufence = 0; @@ -1409,7 +1413,9 @@ static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param) for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) { ret = xe_sync_entry_parse(oa->xe, param->xef, ¶m->syncs[num_syncs], - ¶m->syncs_user[num_syncs], 0); + ¶m->syncs_user[num_syncs], + stream->ufence_syncobj, + ++stream->ufence_timeline_value, 0); if (ret) goto err_syncs; @@ -1539,7 +1545,7 @@ static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg) return -ENODEV; param.xef = stream->xef; - err = xe_oa_parse_syncs(stream->oa, ¶m); + err = xe_oa_parse_syncs(stream->oa, stream, ¶m); if (err) goto err_config_put; @@ -1635,6 +1641,7 @@ static void xe_oa_destroy_locked(struct xe_oa_stream *stream) if (stream->exec_q) xe_exec_queue_put(stream->exec_q); + drm_syncobj_put(stream->ufence_syncobj); kfree(stream); } @@ -1710,7 +1717,6 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, struct xe_oa_open_param *param) { struct xe_gt *gt = param->hwe->gt; - unsigned int fw_ref; int ret; stream->exec_q = param->exec_q; @@ -1765,8 +1771,8 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, /* Take runtime pm ref and forcewake to disable RC6 */ xe_pm_runtime_get(stream->oa->xe); - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + stream->fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(stream->fw_ref, XE_FORCEWAKE_ALL)) { ret = -ETIMEDOUT; goto err_fw_put; } @@ -1811,7 +1817,7 @@ err_put_k_exec_q: err_free_oa_buf: xe_oa_free_oa_buffer(stream); err_fw_put: - xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_force_wake_put(gt_to_fw(gt), stream->fw_ref); xe_pm_runtime_put(stream->oa->xe); if (stream->override_gucrc) xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc)); @@ -1826,6 +1832,7 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa, struct xe_oa_open_param *param) { struct xe_oa_stream *stream; + struct drm_syncobj *ufence_syncobj; int stream_fd; int ret; @@ -1836,17 +1843,31 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa, goto exit; } + ret = drm_syncobj_create(&ufence_syncobj, DRM_SYNCOBJ_CREATE_SIGNALED, + NULL); + if (ret) + goto exit; + stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (!stream) { ret = -ENOMEM; - goto exit; + goto err_syncobj; } - + stream->ufence_syncobj = ufence_syncobj; stream->oa = oa; - ret = xe_oa_stream_init(stream, param); + + ret = xe_oa_parse_syncs(oa, stream, param); if (ret) goto err_free; + ret = xe_oa_stream_init(stream, param); + if (ret) { + while (param->num_syncs--) + xe_sync_entry_cleanup(¶m->syncs[param->num_syncs]); + kfree(param->syncs); + goto err_free; + } + if (!param->disabled) { ret = xe_oa_enable_locked(stream); if (ret) @@ -1870,6 +1891,8 @@ err_destroy: xe_oa_stream_destroy(stream); err_free: kfree(stream); +err_syncobj: + drm_syncobj_put(ufence_syncobj); exit: return ret; } @@ -2083,22 +2106,14 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f goto err_exec_q; } - ret = xe_oa_parse_syncs(oa, ¶m); - if (ret) - goto err_exec_q; - mutex_lock(¶m.hwe->gt->oa.gt_lock); ret = xe_oa_stream_open_ioctl_locked(oa, ¶m); mutex_unlock(¶m.hwe->gt->oa.gt_lock); if (ret < 0) - goto err_sync_cleanup; + goto err_exec_q; return ret; -err_sync_cleanup: - while (param.num_syncs--) - xe_sync_entry_cleanup(¶m.syncs[param.num_syncs]); - kfree(param.syncs); err_exec_q: if (param.exec_q) xe_exec_queue_put(param.exec_q); @@ -2388,11 +2403,13 @@ int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *fi goto sysfs_err; } - mutex_unlock(&oa->metrics_lock); + id = oa_config->id; - drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, oa_config->id); + drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, id); + + mutex_unlock(&oa->metrics_lock); - return oa_config->id; + return id; sysfs_err: mutex_unlock(&oa->metrics_lock); diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h index 2628f78c4e8d..cf080f412189 100644 --- a/drivers/gpu/drm/xe/xe_oa_types.h +++ b/drivers/gpu/drm/xe/xe_oa_types.h @@ -15,6 +15,8 @@ #include "regs/xe_reg_defs.h" #include "xe_hw_engine_types.h" +struct drm_syncobj; + #define DEFAULT_XE_OA_BUFFER_SIZE SZ_16M enum xe_oa_report_header { @@ -248,6 +250,12 @@ struct xe_oa_stream { /** @xef: xe_file with which the stream was opened */ struct xe_file *xef; + /** @ufence_syncobj: User fence syncobj */ + struct drm_syncobj *ufence_syncobj; + + /** @ufence_timeline_value: User fence timeline value */ + u64 ufence_timeline_value; + /** @last_fence: fence to use in stream destroy when needed */ struct dma_fence *last_fence; @@ -256,5 +264,8 @@ struct xe_oa_stream { /** @syncs: syncs to wait on and to signal */ struct xe_sync_entry *syncs; + + /** @fw_ref: Forcewake reference */ + unsigned int fw_ref; }; #endif diff --git a/drivers/gpu/drm/xe/xe_pagefault.c b/drivers/gpu/drm/xe/xe_pagefault.c new file mode 100644 index 000000000000..fe3e40145012 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_pagefault.c @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <linux/circ_buf.h> + +#include <drm/drm_exec.h> +#include <drm/drm_managed.h> + +#include "xe_bo.h" +#include "xe_device.h" +#include "xe_gt_printk.h" +#include "xe_gt_types.h" +#include "xe_gt_stats.h" +#include "xe_hw_engine.h" +#include "xe_pagefault.h" +#include "xe_pagefault_types.h" +#include "xe_svm.h" +#include "xe_trace_bo.h" +#include "xe_vm.h" + +/** + * DOC: Xe page faults + * + * Xe page faults are handled in two layers. The producer layer interacts with + * hardware or firmware to receive and parse faults into struct xe_pagefault, + * then forwards them to the consumer. The consumer layer services the faults + * (e.g., memory migration, page table updates) and acknowledges the result back + * to the producer, which then forwards the results to the hardware or firmware. + * The consumer uses a page fault queue sized to absorb all potential faults and + * a multi-threaded worker to process them. Multiple producers are supported, + * with a single shared consumer. + * + * xe_pagefault.c implements the consumer layer. + */ + +static int xe_pagefault_entry_size(void) +{ + /* + * Power of two alignment is not a hardware requirement, rather a + * software restriction which makes the math for page fault queue + * management simplier. + */ + return roundup_pow_of_two(sizeof(struct xe_pagefault)); +} + +static int xe_pagefault_begin(struct drm_exec *exec, struct xe_vma *vma, + struct xe_vram_region *vram, bool need_vram_move) +{ + struct xe_bo *bo = xe_vma_bo(vma); + struct xe_vm *vm = xe_vma_vm(vma); + int err; + + err = xe_vm_lock_vma(exec, vma); + if (err) + return err; + + if (!bo) + return 0; + + return need_vram_move ? xe_bo_migrate(bo, vram->placement, NULL, exec) : + xe_bo_validate(bo, vm, true, exec); +} + +static int xe_pagefault_handle_vma(struct xe_gt *gt, struct xe_vma *vma, + bool atomic) +{ + struct xe_vm *vm = xe_vma_vm(vma); + struct xe_tile *tile = gt_to_tile(gt); + struct xe_validation_ctx ctx; + struct drm_exec exec; + struct dma_fence *fence; + int err, needs_vram; + + lockdep_assert_held_write(&vm->lock); + + needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic); + if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma))) + return needs_vram < 0 ? needs_vram : -EACCES; + + xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1); + xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, + xe_vma_size(vma) / SZ_1K); + + trace_xe_vma_pagefault(vma); + + /* Check if VMA is valid, opportunistic check only */ + if (xe_vm_has_valid_gpu_mapping(tile, vma->tile_present, + vma->tile_invalidated) && !atomic) + return 0; + +retry_userptr: + if (xe_vma_is_userptr(vma) && + xe_vma_userptr_check_repin(to_userptr_vma(vma))) { + struct xe_userptr_vma *uvma = to_userptr_vma(vma); + + err = xe_vma_userptr_pin_pages(uvma); + if (err) + return err; + } + + /* Lock VM and BOs dma-resv */ + xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {}); + drm_exec_init(&exec, 0, 0); + drm_exec_until_all_locked(&exec) { + err = xe_pagefault_begin(&exec, vma, tile->mem.vram, + needs_vram == 1); + drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &err); + if (err) + goto unlock_dma_resv; + + /* Bind VMA only to the GT that has faulted */ + trace_xe_vma_pf_bind(vma); + xe_vm_set_validation_exec(vm, &exec); + fence = xe_vma_rebind(vm, vma, BIT(tile->id)); + xe_vm_set_validation_exec(vm, NULL); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + xe_validation_retry_on_oom(&ctx, &err); + goto unlock_dma_resv; + } + } + + dma_fence_wait(fence, false); + dma_fence_put(fence); + +unlock_dma_resv: + xe_validation_ctx_fini(&ctx); + if (err == -EAGAIN) + goto retry_userptr; + + return err; +} + +static bool +xe_pagefault_access_is_atomic(enum xe_pagefault_access_type access_type) +{ + return access_type == XE_PAGEFAULT_ACCESS_TYPE_ATOMIC; +} + +static struct xe_vm *xe_pagefault_asid_to_vm(struct xe_device *xe, u32 asid) +{ + struct xe_vm *vm; + + down_read(&xe->usm.lock); + vm = xa_load(&xe->usm.asid_to_vm, asid); + if (vm && xe_vm_in_fault_mode(vm)) + xe_vm_get(vm); + else + vm = ERR_PTR(-EINVAL); + up_read(&xe->usm.lock); + + return vm; +} + +static int xe_pagefault_service(struct xe_pagefault *pf) +{ + struct xe_gt *gt = pf->gt; + struct xe_device *xe = gt_to_xe(gt); + struct xe_vm *vm; + struct xe_vma *vma = NULL; + int err; + bool atomic; + + /* Producer flagged this fault to be nacked */ + if (pf->consumer.fault_level == XE_PAGEFAULT_LEVEL_NACK) + return -EFAULT; + + vm = xe_pagefault_asid_to_vm(xe, pf->consumer.asid); + if (IS_ERR(vm)) + return PTR_ERR(vm); + + /* + * TODO: Change to read lock? Using write lock for simplicity. + */ + down_write(&vm->lock); + + if (xe_vm_is_closed(vm)) { + err = -ENOENT; + goto unlock_vm; + } + + vma = xe_vm_find_vma_by_addr(vm, pf->consumer.page_addr); + if (!vma) { + err = -EINVAL; + goto unlock_vm; + } + + atomic = xe_pagefault_access_is_atomic(pf->consumer.access_type); + + if (xe_vma_is_cpu_addr_mirror(vma)) + err = xe_svm_handle_pagefault(vm, vma, gt, + pf->consumer.page_addr, atomic); + else + err = xe_pagefault_handle_vma(gt, vma, atomic); + +unlock_vm: + if (!err) + vm->usm.last_fault_vma = vma; + up_write(&vm->lock); + xe_vm_put(vm); + + return err; +} + +static bool xe_pagefault_queue_pop(struct xe_pagefault_queue *pf_queue, + struct xe_pagefault *pf) +{ + bool found_fault = false; + + spin_lock_irq(&pf_queue->lock); + if (pf_queue->tail != pf_queue->head) { + memcpy(pf, pf_queue->data + pf_queue->tail, sizeof(*pf)); + pf_queue->tail = (pf_queue->tail + xe_pagefault_entry_size()) % + pf_queue->size; + found_fault = true; + } + spin_unlock_irq(&pf_queue->lock); + + return found_fault; +} + +static void xe_pagefault_print(struct xe_pagefault *pf) +{ + xe_gt_dbg(pf->gt, "\n\tASID: %d\n" + "\tFaulted Address: 0x%08x%08x\n" + "\tFaultType: %d\n" + "\tAccessType: %d\n" + "\tFaultLevel: %d\n" + "\tEngineClass: %d %s\n" + "\tEngineInstance: %d\n", + pf->consumer.asid, + upper_32_bits(pf->consumer.page_addr), + lower_32_bits(pf->consumer.page_addr), + pf->consumer.fault_type, + pf->consumer.access_type, + pf->consumer.fault_level, + pf->consumer.engine_class, + xe_hw_engine_class_to_str(pf->consumer.engine_class), + pf->consumer.engine_instance); +} + +static void xe_pagefault_queue_work(struct work_struct *w) +{ + struct xe_pagefault_queue *pf_queue = + container_of(w, typeof(*pf_queue), worker); + struct xe_pagefault pf; + unsigned long threshold; + +#define USM_QUEUE_MAX_RUNTIME_MS 20 + threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS); + + while (xe_pagefault_queue_pop(pf_queue, &pf)) { + int err; + + if (!pf.gt) /* Fault squashed during reset */ + continue; + + err = xe_pagefault_service(&pf); + if (err) { + xe_pagefault_print(&pf); + xe_gt_dbg(pf.gt, "Fault response: Unsuccessful %pe\n", + ERR_PTR(err)); + } + + pf.producer.ops->ack_fault(&pf, err); + + if (time_after(jiffies, threshold)) { + queue_work(gt_to_xe(pf.gt)->usm.pf_wq, w); + break; + } + } +#undef USM_QUEUE_MAX_RUNTIME_MS +} + +static int xe_pagefault_queue_init(struct xe_device *xe, + struct xe_pagefault_queue *pf_queue) +{ + struct xe_gt *gt; + int total_num_eus = 0; + u8 id; + + for_each_gt(gt, xe, id) { + xe_dss_mask_t all_dss; + int num_dss, num_eus; + + bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, + gt->fuse_topo.c_dss_mask, XE_MAX_DSS_FUSE_BITS); + + num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS); + num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss, + XE_MAX_EU_FUSE_BITS) * num_dss; + + total_num_eus += num_eus; + } + + xe_assert(xe, total_num_eus); + + /* + * user can issue separate page faults per EU and per CS + * + * XXX: Multiplier required as compute UMD are getting PF queue errors + * without it. Follow on why this multiplier is required. + */ +#define PF_MULTIPLIER 8 + pf_queue->size = (total_num_eus + XE_NUM_HW_ENGINES) * + xe_pagefault_entry_size() * PF_MULTIPLIER; + pf_queue->size = roundup_pow_of_two(pf_queue->size); +#undef PF_MULTIPLIER + + drm_dbg(&xe->drm, "xe_pagefault_entry_size=%d, total_num_eus=%d, pf_queue->size=%u", + xe_pagefault_entry_size(), total_num_eus, pf_queue->size); + + spin_lock_init(&pf_queue->lock); + INIT_WORK(&pf_queue->worker, xe_pagefault_queue_work); + + pf_queue->data = drmm_kzalloc(&xe->drm, pf_queue->size, GFP_KERNEL); + if (!pf_queue->data) + return -ENOMEM; + + return 0; +} + +static void xe_pagefault_fini(void *arg) +{ + struct xe_device *xe = arg; + + destroy_workqueue(xe->usm.pf_wq); +} + +/** + * xe_pagefault_init() - Page fault init + * @xe: xe device instance + * + * Initialize Xe page fault state. Must be done after reading fuses. + * + * Return: 0 on Success, errno on failure + */ +int xe_pagefault_init(struct xe_device *xe) +{ + int err, i; + + if (!xe->info.has_usm) + return 0; + + xe->usm.pf_wq = alloc_workqueue("xe_page_fault_work_queue", + WQ_UNBOUND | WQ_HIGHPRI, + XE_PAGEFAULT_QUEUE_COUNT); + if (!xe->usm.pf_wq) + return -ENOMEM; + + for (i = 0; i < XE_PAGEFAULT_QUEUE_COUNT; ++i) { + err = xe_pagefault_queue_init(xe, xe->usm.pf_queue + i); + if (err) + goto err_out; + } + + return devm_add_action_or_reset(xe->drm.dev, xe_pagefault_fini, xe); + +err_out: + destroy_workqueue(xe->usm.pf_wq); + return err; +} + +static void xe_pagefault_queue_reset(struct xe_device *xe, struct xe_gt *gt, + struct xe_pagefault_queue *pf_queue) +{ + u32 i; + + /* Driver load failure guard / USM not enabled guard */ + if (!pf_queue->data) + return; + + /* Squash all pending faults on the GT */ + + spin_lock_irq(&pf_queue->lock); + for (i = pf_queue->tail; i != pf_queue->head; + i = (i + xe_pagefault_entry_size()) % pf_queue->size) { + struct xe_pagefault *pf = pf_queue->data + i; + + if (pf->gt == gt) + pf->gt = NULL; + } + spin_unlock_irq(&pf_queue->lock); +} + +/** + * xe_pagefault_reset() - Page fault reset for a GT + * @xe: xe device instance + * @gt: GT being reset + * + * Reset the Xe page fault state for a GT; that is, squash any pending faults on + * the GT. + */ +void xe_pagefault_reset(struct xe_device *xe, struct xe_gt *gt) +{ + int i; + + for (i = 0; i < XE_PAGEFAULT_QUEUE_COUNT; ++i) + xe_pagefault_queue_reset(xe, gt, xe->usm.pf_queue + i); +} + +static bool xe_pagefault_queue_full(struct xe_pagefault_queue *pf_queue) +{ + lockdep_assert_held(&pf_queue->lock); + + return CIRC_SPACE(pf_queue->head, pf_queue->tail, pf_queue->size) <= + xe_pagefault_entry_size(); +} + +/** + * xe_pagefault_handler() - Page fault handler + * @xe: xe device instance + * @pf: Page fault + * + * Sink the page fault to a queue (i.e., a memory buffer) and queue a worker to + * service it. Safe to be called from IRQ or process context. Reclaim safe. + * + * Return: 0 on success, errno on failure + */ +int xe_pagefault_handler(struct xe_device *xe, struct xe_pagefault *pf) +{ + struct xe_pagefault_queue *pf_queue = xe->usm.pf_queue + + (pf->consumer.asid % XE_PAGEFAULT_QUEUE_COUNT); + unsigned long flags; + bool full; + + spin_lock_irqsave(&pf_queue->lock, flags); + full = xe_pagefault_queue_full(pf_queue); + if (!full) { + memcpy(pf_queue->data + pf_queue->head, pf, sizeof(*pf)); + pf_queue->head = (pf_queue->head + xe_pagefault_entry_size()) % + pf_queue->size; + queue_work(xe->usm.pf_wq, &pf_queue->worker); + } else { + drm_warn(&xe->drm, + "PageFault Queue (%d) full, shouldn't be possible\n", + pf->consumer.asid % XE_PAGEFAULT_QUEUE_COUNT); + } + spin_unlock_irqrestore(&pf_queue->lock, flags); + + return full ? -ENOSPC : 0; +} diff --git a/drivers/gpu/drm/xe/xe_pagefault.h b/drivers/gpu/drm/xe/xe_pagefault.h new file mode 100644 index 000000000000..bd0cdf9ed37f --- /dev/null +++ b/drivers/gpu/drm/xe/xe_pagefault.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_PAGEFAULT_H_ +#define _XE_PAGEFAULT_H_ + +struct xe_device; +struct xe_gt; +struct xe_pagefault; + +int xe_pagefault_init(struct xe_device *xe); + +void xe_pagefault_reset(struct xe_device *xe, struct xe_gt *gt); + +int xe_pagefault_handler(struct xe_device *xe, struct xe_pagefault *pf); + +#endif diff --git a/drivers/gpu/drm/xe/xe_pagefault_types.h b/drivers/gpu/drm/xe/xe_pagefault_types.h new file mode 100644 index 000000000000..d3b516407d60 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_pagefault_types.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_PAGEFAULT_TYPES_H_ +#define _XE_PAGEFAULT_TYPES_H_ + +#include <linux/workqueue.h> + +struct xe_gt; +struct xe_pagefault; + +/** enum xe_pagefault_access_type - Xe page fault access type */ +enum xe_pagefault_access_type { + /** @XE_PAGEFAULT_ACCESS_TYPE_READ: Read access type */ + XE_PAGEFAULT_ACCESS_TYPE_READ = 0, + /** @XE_PAGEFAULT_ACCESS_TYPE_WRITE: Write access type */ + XE_PAGEFAULT_ACCESS_TYPE_WRITE = 1, + /** @XE_PAGEFAULT_ACCESS_TYPE_ATOMIC: Atomic access type */ + XE_PAGEFAULT_ACCESS_TYPE_ATOMIC = 2, +}; + +/** enum xe_pagefault_type - Xe page fault type */ +enum xe_pagefault_type { + /** @XE_PAGEFAULT_TYPE_NOT_PRESENT: Not present */ + XE_PAGEFAULT_TYPE_NOT_PRESENT = 0, + /** @XE_PAGEFAULT_TYPE_WRITE_ACCESS_VIOLATION: Write access violation */ + XE_PAGEFAULT_TYPE_WRITE_ACCESS_VIOLATION = 1, + /** @XE_PAGEFAULT_TYPE_ATOMIC_ACCESS_VIOLATION: Atomic access violation */ + XE_PAGEFAULT_TYPE_ATOMIC_ACCESS_VIOLATION = 2, +}; + +/** struct xe_pagefault_ops - Xe pagefault ops (producer) */ +struct xe_pagefault_ops { + /** + * @ack_fault: Ack fault + * @pf: Page fault + * @err: Error state of fault + * + * Page fault producer receives acknowledgment from the consumer and + * sends the result to the HW/FW interface. + */ + void (*ack_fault)(struct xe_pagefault *pf, int err); +}; + +/** + * struct xe_pagefault - Xe page fault + * + * Generic page fault structure for communication between producer and consumer. + * Carefully sized to be 64 bytes. Upon a device page fault, the producer + * populates this structure, and the consumer copies it into the page-fault + * queue for deferred handling. + */ +struct xe_pagefault { + /** + * @gt: GT of fault + */ + struct xe_gt *gt; + /** + * @consumer: State for the software handling the fault. Populated by + * the producer and may be modified by the consumer to communicate + * information back to the producer upon fault acknowledgment. + */ + struct { + /** @consumer.page_addr: address of page fault */ + u64 page_addr; + /** @consumer.asid: address space ID */ + u32 asid; + /** + * @consumer.access_type: access type, u8 rather than enum to + * keep size compact + */ + u8 access_type; + /** + * @consumer.fault_type: fault type, u8 rather than enum to + * keep size compact + */ + u8 fault_type; +#define XE_PAGEFAULT_LEVEL_NACK 0xff /* Producer indicates nack fault */ + /** @consumer.fault_level: fault level */ + u8 fault_level; + /** @consumer.engine_class: engine class */ + u8 engine_class; + /** @consumer.engine_instance: engine instance */ + u8 engine_instance; + /** consumer.reserved: reserved bits for future expansion */ + u8 reserved[7]; + } consumer; + /** + * @producer: State for the producer (i.e., HW/FW interface). Populated + * by the producer and should not be modified—or even inspected—by the + * consumer, except for calling operations. + */ + struct { + /** @producer.private: private pointer */ + void *private; + /** @producer.ops: operations */ + const struct xe_pagefault_ops *ops; +#define XE_PAGEFAULT_PRODUCER_MSG_LEN_DW 4 + /** + * @producer.msg: page fault message, used by producer in fault + * acknowledgment to formulate response to HW/FW interface. + * Included in the page-fault message because the producer + * typically receives the fault in a context where memory cannot + * be allocated (e.g., atomic context or the reclaim path). + */ + u32 msg[XE_PAGEFAULT_PRODUCER_MSG_LEN_DW]; + } producer; +}; + +/** + * struct xe_pagefault_queue: Xe pagefault queue (consumer) + * + * Used to capture all device page faults for deferred processing. Size this + * queue to absorb the device’s worst-case number of outstanding faults. + */ +struct xe_pagefault_queue { + /** + * @data: Data in queue containing struct xe_pagefault, protected by + * @lock + */ + void *data; + /** @size: Size of queue in bytes */ + u32 size; + /** @head: Head pointer in bytes, moved by producer, protected by @lock */ + u32 head; + /** @tail: Tail pointer in bytes, moved by consumer, protected by @lock */ + u32 tail; + /** @lock: protects page fault queue */ + spinlock_t lock; + /** @worker: to process page faults */ + struct work_struct worker; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c index 2e7cb99ae87a..68171cceea18 100644 --- a/drivers/gpu/drm/xe/xe_pat.c +++ b/drivers/gpu/drm/xe/xe_pat.c @@ -57,7 +57,7 @@ struct xe_pat_ops { int n_entries); void (*program_media)(struct xe_gt *gt, const struct xe_pat_table_entry table[], int n_entries); - void (*dump)(struct xe_gt *gt, struct drm_printer *p); + int (*dump)(struct xe_gt *gt, struct drm_printer *p); }; static const struct xe_pat_table_entry xelp_pat_table[] = { @@ -115,7 +115,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = { REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \ REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \ .coh_mode = (BUILD_BUG_ON_ZERO(__coh_mode && comp_en) || __coh_mode) ? \ - XE_COH_AT_LEAST_1WAY : XE_COH_NONE \ + XE_COH_AT_LEAST_1WAY : XE_COH_NONE, \ + .valid = 1 \ } static const struct xe_pat_table_entry xe2_pat_table[] = { @@ -154,6 +155,41 @@ static const struct xe_pat_table_entry xe2_pat_table[] = { static const struct xe_pat_table_entry xe2_pat_ats = XE2_PAT( 0, 0, 0, 0, 3, 3 ); static const struct xe_pat_table_entry xe2_pat_pta = XE2_PAT( 0, 0, 0, 0, 3, 0 ); +/* + * Xe3p_XPC PAT table uses the same layout as Xe2/Xe3, except that there's no + * option for compression. Also note that the "L3" and "L4" register fields + * actually control L2 and L3 cache respectively on this platform. + */ +#define XE3P_XPC_PAT(no_promote, l3clos, l3_policy, l4_policy, __coh_mode) \ + XE2_PAT(no_promote, 0, l3clos, l3_policy, l4_policy, __coh_mode) + +static const struct xe_pat_table_entry xe3p_xpc_pat_ats = XE3P_XPC_PAT( 0, 0, 0, 0, 3 ); +static const struct xe_pat_table_entry xe3p_xpc_pat_pta = XE3P_XPC_PAT( 0, 0, 0, 0, 0 ); + +static const struct xe_pat_table_entry xe3p_xpc_pat_table[] = { + [ 0] = XE3P_XPC_PAT( 0, 0, 0, 0, 0 ), + [ 1] = XE3P_XPC_PAT( 0, 0, 0, 0, 2 ), + [ 2] = XE3P_XPC_PAT( 0, 0, 0, 0, 3 ), + [ 3] = XE3P_XPC_PAT( 0, 0, 3, 3, 0 ), + [ 4] = XE3P_XPC_PAT( 0, 0, 3, 3, 2 ), + [ 5] = XE3P_XPC_PAT( 0, 0, 3, 0, 0 ), + [ 6] = XE3P_XPC_PAT( 0, 0, 3, 0, 2 ), + [ 7] = XE3P_XPC_PAT( 0, 0, 3, 0, 3 ), + [ 8] = XE3P_XPC_PAT( 0, 0, 0, 3, 0 ), + [ 9] = XE3P_XPC_PAT( 0, 0, 0, 3, 2 ), + [10] = XE3P_XPC_PAT( 0, 0, 0, 3, 3 ), + /* 11..22 are reserved; leave set to all 0's */ + [23] = XE3P_XPC_PAT( 0, 1, 0, 0, 0 ), + [24] = XE3P_XPC_PAT( 0, 1, 0, 0, 2 ), + [25] = XE3P_XPC_PAT( 0, 1, 0, 0, 3 ), + [26] = XE3P_XPC_PAT( 0, 2, 0, 0, 0 ), + [27] = XE3P_XPC_PAT( 0, 2, 0, 0, 2 ), + [28] = XE3P_XPC_PAT( 0, 2, 0, 0, 3 ), + [29] = XE3P_XPC_PAT( 0, 3, 0, 0, 0 ), + [30] = XE3P_XPC_PAT( 0, 3, 0, 0, 2 ), + [31] = XE3P_XPC_PAT( 0, 3, 0, 0, 3 ), +}; + u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index) { WARN_ON(pat_index >= xe->pat.n_entries); @@ -194,7 +230,7 @@ static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry ta xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe->pat.pat_pta->value); } -static void xelp_dump(struct xe_gt *gt, struct drm_printer *p) +static int xelp_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); unsigned int fw_ref; @@ -202,7 +238,7 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p) fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (!fw_ref) - return; + return -ETIMEDOUT; drm_printf(p, "PAT table:\n"); @@ -215,6 +251,7 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p) } xe_force_wake_put(gt_to_fw(gt), fw_ref); + return 0; } static const struct xe_pat_ops xelp_pat_ops = { @@ -222,7 +259,7 @@ static const struct xe_pat_ops xelp_pat_ops = { .dump = xelp_dump, }; -static void xehp_dump(struct xe_gt *gt, struct drm_printer *p) +static int xehp_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); unsigned int fw_ref; @@ -230,7 +267,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p) fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (!fw_ref) - return; + return -ETIMEDOUT; drm_printf(p, "PAT table:\n"); @@ -245,6 +282,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p) } xe_force_wake_put(gt_to_fw(gt), fw_ref); + return 0; } static const struct xe_pat_ops xehp_pat_ops = { @@ -252,7 +290,7 @@ static const struct xe_pat_ops xehp_pat_ops = { .dump = xehp_dump, }; -static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p) +static int xehpc_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); unsigned int fw_ref; @@ -260,7 +298,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p) fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (!fw_ref) - return; + return -ETIMEDOUT; drm_printf(p, "PAT table:\n"); @@ -273,6 +311,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p) } xe_force_wake_put(gt_to_fw(gt), fw_ref); + return 0; } static const struct xe_pat_ops xehpc_pat_ops = { @@ -280,7 +319,7 @@ static const struct xe_pat_ops xehpc_pat_ops = { .dump = xehpc_dump, }; -static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) +static int xelpg_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); unsigned int fw_ref; @@ -288,7 +327,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (!fw_ref) - return; + return -ETIMEDOUT; drm_printf(p, "PAT table:\n"); @@ -306,6 +345,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) } xe_force_wake_put(gt_to_fw(gt), fw_ref); + return 0; } /* @@ -318,7 +358,7 @@ static const struct xe_pat_ops xelpg_pat_ops = { .dump = xelpg_dump, }; -static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) +static int xe2_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); unsigned int fw_ref; @@ -327,9 +367,9 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (!fw_ref) - return; + return -ETIMEDOUT; - drm_printf(p, "PAT table:\n"); + drm_printf(p, "PAT table: (* = reserved entry)\n"); for (i = 0; i < xe->pat.n_entries; i++) { if (xe_gt_is_media_type(gt)) @@ -337,14 +377,14 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) else pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i))); - drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u, %u ] (%#8x)\n", i, + drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u, %u ] (%#8x)%s\n", i, !!(pat & XE2_NO_PROMOTE), !!(pat & XE2_COMP_EN), REG_FIELD_GET(XE2_L3_CLOS, pat), REG_FIELD_GET(XE2_L3_POLICY, pat), REG_FIELD_GET(XE2_L4_POLICY, pat), REG_FIELD_GET(XE2_COH_MODE, pat), - pat); + pat, xe->pat.table[i].valid ? "" : " *"); } /* @@ -367,6 +407,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) pat); xe_force_wake_put(gt_to_fw(gt), fw_ref); + return 0; } static const struct xe_pat_ops xe2_pat_ops = { @@ -375,9 +416,68 @@ static const struct xe_pat_ops xe2_pat_ops = { .dump = xe2_dump, }; +static int xe3p_xpc_dump(struct xe_gt *gt, struct drm_printer *p) +{ + struct xe_device *xe = gt_to_xe(gt); + unsigned int fw_ref; + u32 pat; + int i; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; + + drm_printf(p, "PAT table: (* = reserved entry)\n"); + + for (i = 0; i < xe->pat.n_entries; i++) { + pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i))); + + drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u ] (%#8x)%s\n", i, + !!(pat & XE2_NO_PROMOTE), + REG_FIELD_GET(XE2_L3_CLOS, pat), + REG_FIELD_GET(XE2_L3_POLICY, pat), + REG_FIELD_GET(XE2_L4_POLICY, pat), + REG_FIELD_GET(XE2_COH_MODE, pat), + pat, xe->pat.table[i].valid ? "" : " *"); + } + + /* + * Also print PTA_MODE, which describes how the hardware accesses + * PPGTT entries. + */ + pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA)); + + drm_printf(p, "Page Table Access:\n"); + drm_printf(p, "PTA_MODE= [ %u, %u, %u, %u, %u ] (%#8x)\n", + !!(pat & XE2_NO_PROMOTE), + REG_FIELD_GET(XE2_L3_CLOS, pat), + REG_FIELD_GET(XE2_L3_POLICY, pat), + REG_FIELD_GET(XE2_L4_POLICY, pat), + REG_FIELD_GET(XE2_COH_MODE, pat), + pat); + + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return 0; +} + +static const struct xe_pat_ops xe3p_xpc_pat_ops = { + .program_graphics = program_pat_mcr, + .program_media = program_pat, + .dump = xe3p_xpc_dump, +}; + void xe_pat_init_early(struct xe_device *xe) { - if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) { + if (GRAPHICS_VERx100(xe) == 3511) { + xe->pat.ops = &xe3p_xpc_pat_ops; + xe->pat.table = xe3p_xpc_pat_table; + xe->pat.pat_ats = &xe3p_xpc_pat_ats; + xe->pat.pat_pta = &xe3p_xpc_pat_pta; + xe->pat.n_entries = ARRAY_SIZE(xe3p_xpc_pat_table); + xe->pat.idx[XE_CACHE_NONE] = 3; + xe->pat.idx[XE_CACHE_WT] = 3; /* N/A (no display); use UC */ + xe->pat.idx[XE_CACHE_WB] = 2; + } else if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) { xe->pat.ops = &xe2_pat_ops; xe->pat.table = xe2_pat_table; xe->pat.pat_ats = &xe2_pat_ats; @@ -462,12 +562,19 @@ void xe_pat_init(struct xe_gt *gt) xe->pat.ops->program_graphics(gt, xe->pat.table, xe->pat.n_entries); } -void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p) +/** + * xe_pat_dump() - Dump GT PAT table into a drm printer. + * @gt: the &xe_gt + * @p: the &drm_printer + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_pat_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); if (!xe->pat.ops) - return; + return -EOPNOTSUPP; - xe->pat.ops->dump(gt, p); + return xe->pat.ops->dump(gt, p); } diff --git a/drivers/gpu/drm/xe/xe_pat.h b/drivers/gpu/drm/xe/xe_pat.h index fa0dfbe525cd..05dae03a5f54 100644 --- a/drivers/gpu/drm/xe/xe_pat.h +++ b/drivers/gpu/drm/xe/xe_pat.h @@ -29,6 +29,11 @@ struct xe_pat_table_entry { #define XE_COH_NONE 1 #define XE_COH_AT_LEAST_1WAY 2 u16 coh_mode; + + /** + * @valid: Set to 1 if the entry is valid, 0 if it's reserved. + */ + u16 valid; }; /** @@ -43,12 +48,7 @@ void xe_pat_init_early(struct xe_device *xe); */ void xe_pat_init(struct xe_gt *gt); -/** - * xe_pat_dump - Dump PAT table - * @gt: GT structure - * @p: Printer to dump info to - */ -void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p); +int xe_pat_dump(struct xe_gt *gt, struct drm_printer *p); /** * xe_pat_index_get_coh_mode - Extract the coherency mode for the given diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 9a6df79fc5b6..4636e4ef9baa 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -30,6 +30,7 @@ #include "xe_pci_sriov.h" #include "xe_pci_types.h" #include "xe_pm.h" +#include "xe_printk.h" #include "xe_sriov.h" #include "xe_step.h" #include "xe_survivability_mode.h" @@ -51,15 +52,10 @@ __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); static const struct xe_graphics_desc graphics_xelp = { .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), - - .va_bits = 48, - .vm_max_level = 3, }; #define XE_HP_FEATURES \ - .has_range_tlb_inval = true, \ - .va_bits = 48, \ - .vm_max_level = 3 + .has_range_tlb_inval = true static const struct xe_graphics_desc graphics_xehpg = { .hw_engine_mask = @@ -68,9 +64,6 @@ static const struct xe_graphics_desc graphics_xehpg = { BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), XE_HP_FEATURES, - .vram_flags = XE_VRAM_FLAGS_NEED64K, - - .has_flat_ccs = 1, }; static const struct xe_graphics_desc graphics_xehpc = { @@ -84,9 +77,6 @@ static const struct xe_graphics_desc graphics_xehpc = { BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), XE_HP_FEATURES, - .va_bits = 57, - .vm_max_level = 4, - .vram_flags = XE_VRAM_FLAGS_NEED64K, .has_asid = 1, .has_atomic_enable_pte_bit = 1, @@ -104,12 +94,9 @@ static const struct xe_graphics_desc graphics_xelpg = { #define XE2_GFX_FEATURES \ .has_asid = 1, \ .has_atomic_enable_pte_bit = 1, \ - .has_flat_ccs = 1, \ .has_range_tlb_inval = 1, \ .has_usm = 1, \ .has_64bit_timestamp = 1, \ - .va_bits = 48, \ - .vm_max_level = 4, \ .hw_engine_mask = \ BIT(XE_HW_ENGINE_RCS0) | \ BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ @@ -119,6 +106,13 @@ static const struct xe_graphics_desc graphics_xe2 = { XE2_GFX_FEATURES, }; +static const struct xe_graphics_desc graphics_xe3p_xpc = { + XE2_GFX_FEATURES, + .hw_engine_mask = + GENMASK(XE_HW_ENGINE_BCS8, XE_HW_ENGINE_BCS1) | + GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0), +}; + static const struct xe_media_desc media_xem = { .hw_engine_mask = GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | @@ -149,6 +143,9 @@ static const struct xe_ip graphics_ips[] = { { 3000, "Xe3_LPG", &graphics_xe2 }, { 3001, "Xe3_LPG", &graphics_xe2 }, { 3003, "Xe3_LPG", &graphics_xe2 }, + { 3004, "Xe3_LPG", &graphics_xe2 }, + { 3005, "Xe3_LPG", &graphics_xe2 }, + { 3511, "Xe3p_XPC", &graphics_xe3p_xpc }, }; /* Pre-GMDID Media IPs */ @@ -162,6 +159,8 @@ static const struct xe_ip media_ips[] = { { 2000, "Xe2_LPM", &media_xelpmp }, { 3000, "Xe3_LPM", &media_xelpmp }, { 3002, "Xe3_LPM", &media_xelpmp }, + { 3500, "Xe3p_LPM", &media_xelpmp }, + { 3503, "Xe3p_HPM", &media_xelpmp }, }; static const struct xe_device_desc tgl_desc = { @@ -174,6 +173,8 @@ static const struct xe_device_desc tgl_desc = { .has_sriov = true, .max_gt_per_tile = 1, .require_force_probe = true, + .va_bits = 48, + .vm_max_level = 3, }; static const struct xe_device_desc rkl_desc = { @@ -185,6 +186,8 @@ static const struct xe_device_desc rkl_desc = { .has_llc = true, .max_gt_per_tile = 1, .require_force_probe = true, + .va_bits = 48, + .vm_max_level = 3, }; static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; @@ -203,6 +206,8 @@ static const struct xe_device_desc adl_s_desc = { { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, {}, }, + .va_bits = 48, + .vm_max_level = 3, }; static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; @@ -221,6 +226,8 @@ static const struct xe_device_desc adl_p_desc = { { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, {}, }, + .va_bits = 48, + .vm_max_level = 3, }; static const struct xe_device_desc adl_n_desc = { @@ -233,6 +240,8 @@ static const struct xe_device_desc adl_n_desc = { .has_sriov = true, .max_gt_per_tile = 1, .require_force_probe = true, + .va_bits = 48, + .vm_max_level = 3, }; #define DGFX_FEATURES \ @@ -249,6 +258,8 @@ static const struct xe_device_desc dg1_desc = { .has_heci_gscfi = 1, .max_gt_per_tile = 1, .require_force_probe = true, + .va_bits = 48, + .vm_max_level = 3, }; static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; @@ -258,6 +269,7 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; #define DG2_FEATURES \ DGFX_FEATURES, \ PLATFORM(DG2), \ + .has_flat_ccs = 1, \ .has_gsc_nvm = 1, \ .has_heci_gscfi = 1, \ .subplatforms = (const struct xe_subplatform_desc[]) { \ @@ -265,7 +277,10 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ { } \ - } + }, \ + .va_bits = 48, \ + .vm_max_level = 3, \ + .vram_flags = XE_VRAM_FLAGS_NEED64K static const struct xe_device_desc ats_m_desc = { .pre_gmdid_graphics_ip = &graphics_ip_xehpg, @@ -303,6 +318,9 @@ static const __maybe_unused struct xe_device_desc pvc_desc = { .max_gt_per_tile = 1, .max_remote_tiles = 1, .require_force_probe = true, + .va_bits = 57, + .vm_max_level = 4, + .vram_flags = XE_VRAM_FLAGS_NEED64K, .has_mbx_power_limits = false, }; @@ -314,39 +332,86 @@ static const struct xe_device_desc mtl_desc = { .has_display = true, .has_pxp = true, .max_gt_per_tile = 2, + .va_bits = 48, + .vm_max_level = 3, }; static const struct xe_device_desc lnl_desc = { PLATFORM(LUNARLAKE), .dma_mask_size = 46, .has_display = true, + .has_flat_ccs = 1, .has_pxp = true, + .has_mem_copy_instr = true, .max_gt_per_tile = 2, .needs_scratch = true, + .va_bits = 48, + .vm_max_level = 4, }; +static const u16 bmg_g21_ids[] = { INTEL_BMG_G21_IDS(NOP), 0 }; + static const struct xe_device_desc bmg_desc = { DGFX_FEATURES, PLATFORM(BATTLEMAGE), .dma_mask_size = 46, .has_display = true, .has_fan_control = true, + .has_flat_ccs = 1, .has_mbx_power_limits = true, .has_gsc_nvm = 1, .has_heci_cscfi = 1, .has_late_bind = true, .has_sriov = true, + .has_mem_copy_instr = true, .max_gt_per_tile = 2, .needs_scratch = true, + .subplatforms = (const struct xe_subplatform_desc[]) { + { XE_SUBPLATFORM_BATTLEMAGE_G21, "G21", bmg_g21_ids }, + { } + }, + .va_bits = 48, + .vm_max_level = 4, }; static const struct xe_device_desc ptl_desc = { PLATFORM(PANTHERLAKE), .dma_mask_size = 46, .has_display = true, + .has_flat_ccs = 1, .has_sriov = true, + .has_mem_copy_instr = true, .max_gt_per_tile = 2, .needs_scratch = true, + .needs_shared_vf_gt_wq = true, + .va_bits = 48, + .vm_max_level = 4, +}; + +static const struct xe_device_desc nvls_desc = { + PLATFORM(NOVALAKE_S), + .dma_mask_size = 46, + .has_display = true, + .has_flat_ccs = 1, + .has_mem_copy_instr = true, + .max_gt_per_tile = 2, + .require_force_probe = true, + .va_bits = 48, + .vm_max_level = 4, +}; + +static const struct xe_device_desc cri_desc = { + DGFX_FEATURES, + PLATFORM(CRESCENTISLAND), + .dma_mask_size = 52, + .has_display = false, + .has_flat_ccs = false, + .has_mbx_power_limits = true, + .has_sriov = true, + .max_gt_per_tile = 2, + .require_force_probe = true, + .va_bits = 57, + .vm_max_level = 4, }; #undef PLATFORM @@ -375,6 +440,9 @@ static const struct pci_device_id pciidlist[] = { INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), + INTEL_WCL_IDS(INTEL_VGA_DEVICE, &ptl_desc), + INTEL_NVLS_IDS(INTEL_VGA_DEVICE, &nvls_desc), + INTEL_CRI_IDS(INTEL_PCI_DEVICE, &cri_desc), { } }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -447,7 +515,7 @@ enum xe_gmdid_type { GMDID_MEDIA }; -static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) +static int read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) { struct xe_mmio *mmio = xe_root_tile_mmio(xe); struct xe_reg gmdid_reg = GMD_ID; @@ -456,22 +524,24 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); if (IS_SRIOV_VF(xe)) { - struct xe_gt *gt = xe_root_mmio_gt(xe); - /* * To get the value of the GMDID register, VFs must obtain it * from the GuC using MMIO communication. * - * Note that at this point the xe_gt is not fully uninitialized - * and only basic access to MMIO registers is possible. To use - * our existing GuC communication functions we must perform at - * least basic xe_gt and xe_guc initialization. - * - * Since to obtain the value of GMDID_MEDIA we need to use the - * media GuC, temporarily tweak the gt type. + * Note that at this point the GTs are not initialized and only + * tile-level access to MMIO registers is possible. To use our + * existing GuC communication functions we must create a dummy + * GT structure and perform at least basic xe_gt and xe_guc + * initialization. */ - xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); + struct xe_gt *gt __free(kfree) = NULL; + int err; + + gt = kzalloc(sizeof(*gt), GFP_KERNEL); + if (!gt) + return -ENOMEM; + gt->tile = &xe->tiles[0]; if (type == GMDID_MEDIA) { gt->info.id = 1; gt->info.type = XE_GT_TYPE_MEDIA; @@ -483,15 +553,11 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, xe_gt_mmio_init(gt); xe_guc_comm_init_early(>->uc.guc); - /* Don't bother with GMDID if failed to negotiate the GuC ABI */ - val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); + err = xe_gt_sriov_vf_bootstrap(gt); + if (err) + return err; - /* - * Only undo xe_gt.info here, the remaining changes made above - * will be overwritten as part of the regular initialization. - */ - gt->info.id = 0; - gt->info.type = XE_GT_TYPE_UNINITIALIZED; + val = xe_gt_sriov_vf_gmdid(gt); } else { /* * GMD_ID is a GT register, but at this point in the driver @@ -509,6 +575,8 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); *revid = REG_FIELD_GET(GMD_ID_REVID, val); + + return 0; } static const struct xe_ip *find_graphics_ip(unsigned int verx100) @@ -535,18 +603,21 @@ static const struct xe_ip *find_media_ip(unsigned int verx100) * Read IP version from hardware and select graphics/media IP descriptors * based on the result. */ -static void handle_gmdid(struct xe_device *xe, - const struct xe_ip **graphics_ip, - const struct xe_ip **media_ip, - u32 *graphics_revid, - u32 *media_revid) +static int handle_gmdid(struct xe_device *xe, + const struct xe_ip **graphics_ip, + const struct xe_ip **media_ip, + u32 *graphics_revid, + u32 *media_revid) { u32 ver; + int ret; *graphics_ip = NULL; *media_ip = NULL; - read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); + ret = read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); + if (ret) + return ret; *graphics_ip = find_graphics_ip(ver); if (!*graphics_ip) { @@ -554,16 +625,21 @@ static void handle_gmdid(struct xe_device *xe, ver / 100, ver % 100); } - read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); + ret = read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); + if (ret) + return ret; + /* Media may legitimately be fused off / not present */ if (ver == 0) - return; + return 0; *media_ip = find_media_ip(ver); if (!*media_ip) { drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", ver / 100, ver % 100); } + + return 0; } /* @@ -582,8 +658,14 @@ static int xe_info_init_early(struct xe_device *xe, subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; xe->info.dma_mask_size = desc->dma_mask_size; + xe->info.va_bits = desc->va_bits; + xe->info.vm_max_level = desc->vm_max_level; + xe->info.vram_flags = desc->vram_flags; + xe->info.is_dgfx = desc->is_dgfx; xe->info.has_fan_control = desc->has_fan_control; + /* runtime fusing may force flat_ccs to disabled later */ + xe->info.has_flat_ccs = desc->has_flat_ccs; xe->info.has_mbx_power_limits = desc->has_mbx_power_limits; xe->info.has_gsc_nvm = desc->has_gsc_nvm; xe->info.has_heci_gscfi = desc->has_heci_gscfi; @@ -591,11 +673,14 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.has_late_bind = desc->has_late_bind; xe->info.has_llc = desc->has_llc; xe->info.has_pxp = desc->has_pxp; - xe->info.has_sriov = desc->has_sriov; + xe->info.has_sriov = xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev)) && + desc->has_sriov; + xe->info.has_mem_copy_instr = desc->has_mem_copy_instr; xe->info.skip_guc_pc = desc->skip_guc_pc; xe->info.skip_mtcfg = desc->skip_mtcfg; xe->info.skip_pcode = desc->skip_pcode; xe->info.needs_scratch = desc->needs_scratch; + xe->info.needs_shared_vf_gt_wq = desc->needs_shared_vf_gt_wq; xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && xe_modparam.probe_display && @@ -651,6 +736,63 @@ static void xe_info_probe_tile_count(struct xe_device *xe) } } +static struct xe_gt *alloc_primary_gt(struct xe_tile *tile, + const struct xe_graphics_desc *graphics_desc, + const struct xe_media_desc *media_desc) +{ + struct xe_device *xe = tile_to_xe(tile); + struct xe_gt *gt; + + if (!xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev))) { + xe_info(xe, "Primary GT disabled via configfs\n"); + return NULL; + } + + gt = xe_gt_alloc(tile); + if (IS_ERR(gt)) + return gt; + + gt->info.type = XE_GT_TYPE_MAIN; + gt->info.id = tile->id * xe->info.max_gt_per_tile; + gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; + gt->info.engine_mask = graphics_desc->hw_engine_mask; + + /* + * Before media version 13, the media IP was part of the primary GT + * so we need to add the media engines to the primary GT's engine list. + */ + if (MEDIA_VER(xe) < 13 && media_desc) + gt->info.engine_mask |= media_desc->hw_engine_mask; + + return gt; +} + +static struct xe_gt *alloc_media_gt(struct xe_tile *tile, + const struct xe_media_desc *media_desc) +{ + struct xe_device *xe = tile_to_xe(tile); + struct xe_gt *gt; + + if (!xe_configfs_media_gt_allowed(to_pci_dev(xe->drm.dev))) { + xe_info(xe, "Media GT disabled via configfs\n"); + return NULL; + } + + if (MEDIA_VER(xe) < 13 || !media_desc) + return NULL; + + gt = xe_gt_alloc(tile); + if (IS_ERR(gt)) + return gt; + + gt->info.type = XE_GT_TYPE_MEDIA; + gt->info.id = tile->id * xe->info.max_gt_per_tile + 1; + gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; + gt->info.engine_mask = media_desc->hw_engine_mask; + + return gt; +} + /* * Initialize device info content that does require knowledge about * graphics / media IP version. @@ -667,6 +809,7 @@ static int xe_info_init(struct xe_device *xe, const struct xe_media_desc *media_desc; struct xe_tile *tile; struct xe_gt *gt; + int ret; u8 id; /* @@ -682,8 +825,11 @@ static int xe_info_init(struct xe_device *xe, xe->info.step = xe_step_pre_gmdid_get(xe); } else { xe_assert(xe, !desc->pre_gmdid_media_ip); - handle_gmdid(xe, &graphics_ip, &media_ip, - &graphics_gmdid_revid, &media_gmdid_revid); + ret = handle_gmdid(xe, &graphics_ip, &media_ip, + &graphics_gmdid_revid, &media_gmdid_revid); + if (ret) + return ret; + xe->info.step = xe_step_gmdid_get(xe, graphics_gmdid_revid, media_gmdid_revid); @@ -710,17 +856,11 @@ static int xe_info_init(struct xe_device *xe, media_desc = NULL; } - xe->info.vram_flags = graphics_desc->vram_flags; - xe->info.va_bits = graphics_desc->va_bits; - xe->info.vm_max_level = graphics_desc->vm_max_level; xe->info.has_asid = graphics_desc->has_asid; xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; if (xe->info.platform != XE_PVC) xe->info.has_device_atomics_on_smem = 1; - /* Runtime detection may change this later */ - xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; - xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval; xe->info.has_usm = graphics_desc->has_usm; xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp; @@ -735,44 +875,33 @@ static int xe_info_init(struct xe_device *xe, return err; } - /* - * All platforms have at least one primary GT. Any platform with media - * version 13 or higher has an additional dedicated media GT. And - * depending on the graphics IP there may be additional "remote tiles." - * All of these together determine the overall GT count. - */ + /* Allocate any GT and VRAM structures necessary for the platform. */ for_each_tile(tile, xe, id) { int err; - gt = tile->primary_gt; - gt->info.type = XE_GT_TYPE_MAIN; - gt->info.id = tile->id * xe->info.max_gt_per_tile; - gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; - gt->info.engine_mask = graphics_desc->hw_engine_mask; - err = xe_tile_alloc_vram(tile); if (err) return err; - if (MEDIA_VER(xe) < 13 && media_desc) - gt->info.engine_mask |= media_desc->hw_engine_mask; - - if (MEDIA_VER(xe) < 13 || !media_desc) - continue; + tile->primary_gt = alloc_primary_gt(tile, graphics_desc, media_desc); + if (IS_ERR(tile->primary_gt)) + return PTR_ERR(tile->primary_gt); /* - * Allocate and setup media GT for platforms with standalone - * media. + * It's not currently possible to probe a device with the + * primary GT disabled. With some work, this may be future in + * the possible for igpu platforms (although probably not for + * dgpu's since access to the primary GT's BCS engines is + * required for VRAM management). */ - tile->media_gt = xe_gt_alloc(tile); + if (!tile->primary_gt) { + drm_err(&xe->drm, "Cannot probe device with without a primary GT\n"); + return -ENODEV; + } + + tile->media_gt = alloc_media_gt(tile, media_desc); if (IS_ERR(tile->media_gt)) return PTR_ERR(tile->media_gt); - - gt = tile->media_gt; - gt->info.type = XE_GT_TYPE_MEDIA; - gt->info.id = tile->id * xe->info.max_gt_per_tile + 1; - gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; - gt->info.engine_mask = media_desc->hw_engine_mask; } /* diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c index af05db07162e..9ff69c4843b0 100644 --- a/drivers/gpu/drm/xe/xe_pci_sriov.c +++ b/drivers/gpu/drm/xe/xe_pci_sriov.c @@ -17,68 +17,18 @@ #include "xe_pm.h" #include "xe_sriov.h" #include "xe_sriov_pf.h" +#include "xe_sriov_pf_control.h" #include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_provision.h" +#include "xe_sriov_pf_sysfs.h" #include "xe_sriov_printk.h" -static int pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs) -{ - unsigned int n; - - for (n = 1; n <= num_vfs; n++) - if (!xe_gt_sriov_pf_config_is_empty(gt, n)) - return false; - - return true; -} - -static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs) -{ - struct xe_gt *gt; - unsigned int id; - int result = 0, err; - - for_each_gt(gt, xe, id) { - if (!pf_needs_provisioning(gt, num_vfs)) - continue; - err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs); - result = result ?: err; - } - - return result; -} - -static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs) -{ - struct xe_gt *gt; - unsigned int id; - unsigned int n; - - for_each_gt(gt, xe, id) - for (n = 1; n <= num_vfs; n++) - xe_gt_sriov_pf_config_release(gt, n, true); -} - static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs) { - struct xe_gt *gt; - unsigned int id; unsigned int n; - for_each_gt(gt, xe, id) - for (n = 1; n <= num_vfs; n++) - xe_gt_sriov_pf_control_trigger_flr(gt, n); -} - -static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id) -{ - struct pci_dev *pdev = to_pci_dev(xe->drm.dev); - - xe_assert(xe, IS_SRIOV_PF(xe)); - - /* caller must use pci_dev_put() */ - return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), - pdev->bus->number, - pci_iov_virtfn_devfn(pdev, vf_id)); + for (n = 1; n <= num_vfs; n++) + xe_sriov_pf_control_reset_vf(xe, n); } static void pf_link_vfs(struct xe_device *xe, int num_vfs) @@ -99,7 +49,7 @@ static void pf_link_vfs(struct xe_device *xe, int num_vfs) * enforce correct resume order. */ for (n = 1; n <= num_vfs; n++) { - pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1); + pdev_vf = xe_pci_sriov_get_vf_pdev(pdev_pf, n); /* unlikely, something weird is happening, abort */ if (!pdev_vf) { @@ -144,6 +94,20 @@ static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs) return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes)); } +static int pf_prepare_vfs_enabling(struct xe_device *xe) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + /* make sure we are not locked-down by other components */ + return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, false, NULL); +} + +static void pf_finish_vfs_enabling(struct xe_device *xe) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + /* allow other components to lockdown VFs enabling */ + xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, false, NULL); +} + static int pf_enable_vfs(struct xe_device *xe, int num_vfs) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); @@ -159,6 +123,10 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) if (err) goto out; + err = pf_prepare_vfs_enabling(xe); + if (err) + goto out; + /* * We must hold additional reference to the runtime PM to keep PF in D0 * during VFs lifetime, as our VFs do not implement the PM capability. @@ -170,7 +138,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) */ xe_pm_runtime_get_noresume(xe); - err = pf_provision_vfs(xe, num_vfs); + err = xe_sriov_pf_provision_vfs(xe, num_vfs); if (err < 0) goto failed; @@ -189,13 +157,16 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) xe_sriov_info(xe, "Enabled %u of %u VF%s\n", num_vfs, total_vfs, str_plural(total_vfs)); + xe_sriov_pf_sysfs_link_vfs(xe, num_vfs); + pf_engine_activity_stats(xe, num_vfs, true); return num_vfs; failed: - pf_unprovision_vfs(xe, num_vfs); + xe_sriov_pf_unprovision_vfs(xe, num_vfs); xe_pm_runtime_put(xe); + pf_finish_vfs_enabling(xe); out: xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n", num_vfs, str_plural(num_vfs), ERR_PTR(err)); @@ -216,15 +187,19 @@ static int pf_disable_vfs(struct xe_device *xe) pf_engine_activity_stats(xe, num_vfs, false); + xe_sriov_pf_sysfs_unlink_vfs(xe, num_vfs); + pci_disable_sriov(pdev); pf_reset_vfs(xe, num_vfs); - pf_unprovision_vfs(xe, num_vfs); + xe_sriov_pf_unprovision_vfs(xe, num_vfs); /* not needed anymore - see pf_enable_vfs() */ xe_pm_runtime_put(xe); + pf_finish_vfs_enabling(xe); + xe_sriov_info(xe, "Disabled %u VF%s\n", num_vfs, str_plural(num_vfs)); return 0; } @@ -267,3 +242,25 @@ int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) return ret; } + +/** + * xe_pci_sriov_get_vf_pdev() - Lookup the VF's PCI device using the VF identifier. + * @pdev: the PF's &pci_dev + * @vfid: VF identifier (1-based) + * + * The caller must decrement the reference count by calling pci_dev_put(). + * + * Return: the VF's &pci_dev or NULL if the VF device was not found. + */ +struct pci_dev *xe_pci_sriov_get_vf_pdev(struct pci_dev *pdev, unsigned int vfid) +{ + struct xe_device *xe = pdev_to_xe_device(pdev); + + xe_assert(xe, dev_is_pf(&pdev->dev)); + xe_assert(xe, vfid); + xe_assert(xe, vfid <= pci_sriov_get_totalvfs(pdev)); + + return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), + pdev->bus->number, + pci_iov_virtfn_devfn(pdev, vfid - 1)); +} diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.h b/drivers/gpu/drm/xe/xe_pci_sriov.h index c76dd0d90495..b9105d71dbb1 100644 --- a/drivers/gpu/drm/xe/xe_pci_sriov.h +++ b/drivers/gpu/drm/xe/xe_pci_sriov.h @@ -10,6 +10,7 @@ struct pci_dev; #ifdef CONFIG_PCI_IOV int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs); +struct pci_dev *xe_pci_sriov_get_vf_pdev(struct pci_dev *pdev, unsigned int vfid); #else static inline int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h index 9b9766a3baa3..9892c063a9c5 100644 --- a/drivers/gpu/drm/xe/xe_pci_types.h +++ b/drivers/gpu/drm/xe/xe_pci_types.h @@ -30,36 +30,37 @@ struct xe_device_desc { u8 dma_mask_size; u8 max_remote_tiles:2; u8 max_gt_per_tile:2; + u8 va_bits; + u8 vm_max_level; + u8 vram_flags; u8 require_force_probe:1; u8 is_dgfx:1; u8 has_display:1; u8 has_fan_control:1; + u8 has_flat_ccs:1; u8 has_gsc_nvm:1; u8 has_heci_gscfi:1; u8 has_heci_cscfi:1; u8 has_late_bind:1; u8 has_llc:1; u8 has_mbx_power_limits:1; + u8 has_mem_copy_instr:1; u8 has_pxp:1; u8 has_sriov:1; u8 needs_scratch:1; u8 skip_guc_pc:1; u8 skip_mtcfg:1; u8 skip_pcode:1; + u8 needs_shared_vf_gt_wq:1; }; struct xe_graphics_desc { - u8 va_bits; - u8 vm_max_level; - u8 vram_flags; - u64 hw_engine_mask; /* hardware engines provided by graphics IP */ u8 has_asid:1; u8 has_atomic_enable_pte_bit:1; - u8 has_flat_ccs:1; u8 has_indirect_ring_state:1; u8 has_range_tlb_inval:1; u8 has_usm:1; diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c index 6a7ddb9005f9..0d33c14ea0cf 100644 --- a/drivers/gpu/drm/xe/xe_pcode.c +++ b/drivers/gpu/drm/xe/xe_pcode.c @@ -32,27 +32,39 @@ static int pcode_mailbox_status(struct xe_tile *tile) { + const char *err_str; + int err_decode; u32 err; - static const struct pcode_err_decode err_decode[] = { - [PCODE_ILLEGAL_CMD] = {-ENXIO, "Illegal Command"}, - [PCODE_TIMEOUT] = {-ETIMEDOUT, "Timed out"}, - [PCODE_ILLEGAL_DATA] = {-EINVAL, "Illegal Data"}, - [PCODE_ILLEGAL_SUBCOMMAND] = {-ENXIO, "Illegal Subcommand"}, - [PCODE_LOCKED] = {-EBUSY, "PCODE Locked"}, - [PCODE_GT_RATIO_OUT_OF_RANGE] = {-EOVERFLOW, - "GT ratio out of range"}, - [PCODE_REJECTED] = {-EACCES, "PCODE Rejected"}, - [PCODE_ERROR_MASK] = {-EPROTO, "Unknown"}, - }; + +#define CASE_ERR(_err, _err_decode, _err_str) \ + case _err: \ + err_decode = _err_decode; \ + err_str = _err_str; \ + break err = xe_mmio_read32(&tile->mmio, PCODE_MAILBOX) & PCODE_ERROR_MASK; + switch (err) { + CASE_ERR(PCODE_ILLEGAL_CMD, -ENXIO, "Illegal Command"); + CASE_ERR(PCODE_TIMEOUT, -ETIMEDOUT, "Timed out"); + CASE_ERR(PCODE_ILLEGAL_DATA, -EINVAL, "Illegal Data"); + CASE_ERR(PCODE_ILLEGAL_SUBCOMMAND, -ENXIO, "Illegal Subcommand"); + CASE_ERR(PCODE_LOCKED, -EBUSY, "PCODE Locked"); + CASE_ERR(PCODE_GT_RATIO_OUT_OF_RANGE, -EOVERFLOW, "GT ratio out of range"); + CASE_ERR(PCODE_REJECTED, -EACCES, "PCODE Rejected"); + default: + err_decode = -EPROTO; + err_str = "Unknown"; + } + if (err) { - drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err, - err_decode[err].str ?: "Unknown"); - return err_decode[err].errno ?: -EPROTO; + drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", + err_decode, err_str); + + return err_decode; } return 0; +#undef CASE_ERR } static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1, diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h index 92bfcba51e19..70dcd6625680 100644 --- a/drivers/gpu/drm/xe/xe_pcode_api.h +++ b/drivers/gpu/drm/xe/xe_pcode_api.h @@ -92,9 +92,3 @@ #define BMG_PCIE_CAP XE_REG(0x138340) #define LINK_DOWNGRADE REG_GENMASK(1, 0) #define DOWNGRADE_CAPABLE 2 - -struct pcode_err_decode { - int errno; - const char *str; -}; - diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h index d08574c4cdb8..f516dbddfd88 100644 --- a/drivers/gpu/drm/xe/xe_platform_types.h +++ b/drivers/gpu/drm/xe/xe_platform_types.h @@ -24,6 +24,8 @@ enum xe_platform { XE_LUNARLAKE, XE_BATTLEMAGE, XE_PANTHERLAKE, + XE_NOVALAKE_S, + XE_CRESCENTISLAND, }; enum xe_subplatform { @@ -34,6 +36,7 @@ enum xe_subplatform { XE_SUBPLATFORM_DG2_G10, XE_SUBPLATFORM_DG2_G11, XE_SUBPLATFORM_DG2_G12, + XE_SUBPLATFORM_BATTLEMAGE_G21, }; #endif diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 2c5a44377994..44924512830f 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -83,8 +83,58 @@ static struct lockdep_map xe_pm_runtime_d3cold_map = { static struct lockdep_map xe_pm_runtime_nod3cold_map = { .name = "xe_rpm_nod3cold_map" }; + +static struct lockdep_map xe_pm_block_lockdep_map = { + .name = "xe_pm_block_map", +}; #endif +static void xe_pm_block_begin_signalling(void) +{ + lock_acquire_shared_recursive(&xe_pm_block_lockdep_map, 0, 1, NULL, _RET_IP_); +} + +static void xe_pm_block_end_signalling(void) +{ + lock_release(&xe_pm_block_lockdep_map, _RET_IP_); +} + +/** + * xe_pm_might_block_on_suspend() - Annotate that the code might block on suspend + * + * Annotation to use where the code might block or seize to make + * progress pending resume completion. + */ +void xe_pm_might_block_on_suspend(void) +{ + lock_map_acquire(&xe_pm_block_lockdep_map); + lock_map_release(&xe_pm_block_lockdep_map); +} + +/** + * xe_pm_block_on_suspend() - Block pending suspend. + * @xe: The xe device about to be suspended. + * + * Block if the pm notifier has start evicting bos, to avoid + * racing and validating those bos back. The function is + * annotated to ensure no locks are held that are also grabbed + * in the pm notifier or the device suspend / resume. + * This is intended to be used by freezable tasks only. + * (Not freezable workqueues), with the intention that the function + * returns %-ERESTARTSYS when tasks are frozen during suspend, + * and allows the task to freeze. The caller must be able to + * handle the %-ERESTARTSYS. + * + * Return: %0 on success, %-ERESTARTSYS on signal pending or + * if freezing requested. + */ +int xe_pm_block_on_suspend(struct xe_device *xe) +{ + xe_pm_might_block_on_suspend(); + + return wait_for_completion_interruptible(&xe->pm_block); +} + /** * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context * @xe: The xe device. @@ -124,6 +174,7 @@ int xe_pm_suspend(struct xe_device *xe) int err; drm_dbg(&xe->drm, "Suspending device\n"); + xe_pm_block_begin_signalling(); trace_xe_pm_suspend(xe, __builtin_return_address(0)); err = xe_pxp_pm_suspend(xe->pxp); @@ -155,6 +206,8 @@ int xe_pm_suspend(struct xe_device *xe) xe_i2c_pm_suspend(xe); drm_dbg(&xe->drm, "Device suspended\n"); + xe_pm_block_end_signalling(); + return 0; err_display: @@ -162,6 +215,7 @@ err_display: xe_pxp_pm_resume(xe->pxp); err: drm_dbg(&xe->drm, "Device suspend failed %d\n", err); + xe_pm_block_end_signalling(); return err; } @@ -178,6 +232,7 @@ int xe_pm_resume(struct xe_device *xe) u8 id; int err; + xe_pm_block_begin_signalling(); drm_dbg(&xe->drm, "Resuming device\n"); trace_xe_pm_resume(xe, __builtin_return_address(0)); @@ -222,9 +277,11 @@ int xe_pm_resume(struct xe_device *xe) xe_late_bind_fw_load(&xe->late_bind); drm_dbg(&xe->drm, "Device resumed\n"); + xe_pm_block_end_signalling(); return 0; err: drm_dbg(&xe->drm, "Device resume failed %d\n", err); + xe_pm_block_end_signalling(); return err; } @@ -329,9 +386,16 @@ static int xe_pm_notifier_callback(struct notifier_block *nb, switch (action) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: + { + struct xe_validation_ctx ctx; + reinit_completion(&xe->pm_block); + xe_pm_block_begin_signalling(); xe_pm_runtime_get(xe); + (void)xe_validation_ctx_init(&ctx, &xe->val, NULL, + (struct xe_val_flags) {.exclusive = true}); err = xe_bo_evict_all_user(xe); + xe_validation_ctx_fini(&ctx); if (err) drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err); @@ -343,7 +407,9 @@ static int xe_pm_notifier_callback(struct notifier_block *nb, * avoid a runtime suspend interfering with evicted objects or backup * allocations. */ + xe_pm_block_end_signalling(); break; + } case PM_POST_HIBERNATION: case PM_POST_SUSPEND: complete_all(&xe->pm_block); diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h index 59678b310e55..f7f89a18b6fc 100644 --- a/drivers/gpu/drm/xe/xe_pm.h +++ b/drivers/gpu/drm/xe/xe_pm.h @@ -33,6 +33,8 @@ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold); void xe_pm_d3cold_allowed_toggle(struct xe_device *xe); bool xe_rpm_reclaim_safe(const struct xe_device *xe); struct task_struct *xe_pm_read_callback_task(struct xe_device *xe); +int xe_pm_block_on_suspend(struct xe_device *xe); +void xe_pm_might_block_on_suspend(void); int xe_pm_module_init(void); #endif diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c index cab51d826345..c63335eb69e5 100644 --- a/drivers/gpu/drm/xe/xe_pmu.c +++ b/drivers/gpu/drm/xe/xe_pmu.c @@ -497,7 +497,12 @@ static const struct attribute_group *pmu_events_attr_update[] = { static void set_supported_events(struct xe_pmu *pmu) { struct xe_device *xe = container_of(pmu, typeof(*xe), pmu); - struct xe_gt *gt = xe_device_get_gt(xe, 0); + struct xe_gt *gt; + int id; + + /* If there are no GTs, don't support any GT-related events */ + if (xe->info.gt_count == 0) + return; if (!xe->info.skip_guc_pc) { pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY); @@ -505,6 +510,10 @@ static void set_supported_events(struct xe_pmu *pmu) pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_REQUESTED_FREQUENCY); } + /* Find the first available GT to query engine event capabilities */ + for_each_gt(gt, xe, id) + break; + if (xe_guc_engine_activity_supported(>->uc.guc)) { pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS); pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_TOTAL_TICKS); diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c index 83fbeea5aa20..7f587ca3947d 100644 --- a/drivers/gpu/drm/xe/xe_preempt_fence.c +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c @@ -8,6 +8,8 @@ #include <linux/slab.h> #include "xe_exec_queue.h" +#include "xe_gt_printk.h" +#include "xe_guc_exec_queue_types.h" #include "xe_vm.h" static void preempt_fence_work_func(struct work_struct *w) @@ -22,6 +24,15 @@ static void preempt_fence_work_func(struct work_struct *w) } else if (!q->ops->reset_status(q)) { int err = q->ops->suspend_wait(q); + if (err == -EAGAIN) { + xe_gt_dbg(q->gt, "PREEMPT FENCE RETRY guc_id=%d", + q->guc->id); + queue_work(q->vm->xe->preempt_fence_wq, + &pfence->preempt_work); + dma_fence_end_signalling(cookie); + return; + } + if (err) dma_fence_set_error(&pfence->base, err); } else { diff --git a/drivers/gpu/drm/xe/xe_preempt_fence_types.h b/drivers/gpu/drm/xe/xe_preempt_fence_types.h index 312c3372a49f..ac125c697a41 100644 --- a/drivers/gpu/drm/xe/xe_preempt_fence_types.h +++ b/drivers/gpu/drm/xe/xe_preempt_fence_types.h @@ -12,7 +12,7 @@ struct xe_exec_queue; /** - * struct xe_preempt_fence - XE preempt fence + * struct xe_preempt_fence - Xe preempt fence * * hardware and triggers a callback once the xe_engine is complete. */ diff --git a/drivers/gpu/drm/xe/xe_psmi.c b/drivers/gpu/drm/xe/xe_psmi.c index 45d142191d60..6a54e38b81ba 100644 --- a/drivers/gpu/drm/xe/xe_psmi.c +++ b/drivers/gpu/drm/xe/xe_psmi.c @@ -70,8 +70,8 @@ static struct xe_bo *psmi_alloc_object(struct xe_device *xe, { struct xe_tile *tile; - if (!id || !bo_size) - return NULL; + xe_assert(xe, id); + xe_assert(xe, bo_size); tile = &xe->tiles[id - 1]; diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index 07f96bda638a..884127b4d97d 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -3,8 +3,6 @@ * Copyright © 2022 Intel Corporation */ -#include <linux/dma-fence-array.h> - #include "xe_pt.h" #include "regs/xe_gtt_defs.h" @@ -122,7 +120,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE | XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE; if (vm->xef) /* userspace */ - bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE; + bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE | XE_BO_FLAG_FORCE_USER_VRAM; pt->level = level; @@ -715,7 +713,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, .vm = vm, .tile = tile, .curs = &curs, - .va_curs_start = range ? range->base.itree.start : + .va_curs_start = range ? xe_svm_range_start(range) : xe_vma_start(vma), .vma = vma, .wupd.entries = entries, @@ -734,7 +732,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, } if (xe_svm_range_has_dma_mapping(range)) { xe_res_first_dma(range->base.pages.dma_addr, 0, - range->base.itree.last + 1 - range->base.itree.start, + xe_svm_range_size(range), &curs); xe_svm_range_debug(range, "BIND PREPARE - MIXED"); } else { @@ -778,8 +776,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, walk_pt: ret = xe_pt_walk_range(&pt->base, pt->level, - range ? range->base.itree.start : xe_vma_start(vma), - range ? range->base.itree.last + 1 : xe_vma_end(vma), + range ? xe_svm_range_start(range) : xe_vma_start(vma), + range ? xe_svm_range_end(range) : xe_vma_end(vma), &xe_walk.base); *num_entries = xe_walk.wupd.num_used_entries; @@ -975,8 +973,8 @@ bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm, if (!(pt_mask & BIT(tile->id))) return false; - (void)xe_pt_walk_shared(&pt->base, pt->level, range->base.itree.start, - range->base.itree.last + 1, &xe_walk.base); + (void)xe_pt_walk_shared(&pt->base, pt->level, xe_svm_range_start(range), + xe_svm_range_end(range), &xe_walk.base); return xe_walk.needs_invalidate; } @@ -1340,13 +1338,6 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job, return err; } - if (!(pt_update_ops->q->flags & EXEC_QUEUE_FLAG_KERNEL)) { - if (job) - err = xe_sched_job_last_fence_add_dep(job, vm); - else - err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm); - } - for (i = 0; job && !err && i < vops->num_syncs; i++) err = xe_sync_entry_add_deps(&vops->syncs[i], job); @@ -1661,8 +1652,8 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_svm_range *range, struct xe_vm_pgtable_update *entries) { - u64 start = range ? range->base.itree.start : xe_vma_start(vma); - u64 end = range ? range->base.itree.last + 1 : xe_vma_end(vma); + u64 start = range ? xe_svm_range_start(range) : xe_vma_start(vma); + u64 end = range ? xe_svm_range_end(range) : xe_vma_end(vma); struct xe_pt_stage_unbind_walk xe_walk = { .base = { .ops = &xe_pt_stage_unbind_ops, @@ -1872,7 +1863,7 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile, vm_dbg(&xe_vma_vm(vma)->xe->drm, "Preparing bind, with range [%lx...%lx)\n", - range->base.itree.start, range->base.itree.last); + xe_svm_range_start(range), xe_svm_range_end(range) - 1); pt_op->vma = NULL; pt_op->bind = true; @@ -1887,8 +1878,8 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile, pt_op->num_entries, true); xe_pt_update_ops_rfence_interval(pt_update_ops, - range->base.itree.start, - range->base.itree.last + 1); + xe_svm_range_start(range), + xe_svm_range_end(range)); ++pt_update_ops->current_op; pt_update_ops->needs_svm_lock = true; @@ -1983,7 +1974,7 @@ static int unbind_range_prepare(struct xe_vm *vm, vm_dbg(&vm->xe->drm, "Preparing unbind, with range [%lx...%lx)\n", - range->base.itree.start, range->base.itree.last); + xe_svm_range_start(range), xe_svm_range_end(range) - 1); pt_op->vma = XE_INVALID_VMA; pt_op->bind = false; @@ -1994,8 +1985,8 @@ static int unbind_range_prepare(struct xe_vm *vm, xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, pt_op->num_entries, false); - xe_pt_update_ops_rfence_interval(pt_update_ops, range->base.itree.start, - range->base.itree.last + 1); + xe_pt_update_ops_rfence_interval(pt_update_ops, xe_svm_range_start(range), + xe_svm_range_end(range)); ++pt_update_ops->current_op; pt_update_ops->needs_svm_lock = true; pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) || @@ -2359,10 +2350,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) struct xe_vm *vm = vops->vm; struct xe_vm_pgtable_update_ops *pt_update_ops = &vops->pt_update_ops[tile->id]; - struct dma_fence *fence, *ifence, *mfence; + struct xe_exec_queue *q = pt_update_ops->q; + struct dma_fence *fence, *ifence = NULL, *mfence = NULL; struct xe_tlb_inval_job *ijob = NULL, *mjob = NULL; - struct dma_fence **fences = NULL; - struct dma_fence_array *cf = NULL; struct xe_range_fence *rfence; struct xe_vma_op *op; int err = 0, i; @@ -2390,15 +2380,14 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) #endif if (pt_update_ops->needs_invalidation) { - struct xe_exec_queue *q = pt_update_ops->q; struct xe_dep_scheduler *dep_scheduler = to_dep_scheduler(q, tile->primary_gt); ijob = xe_tlb_inval_job_create(q, &tile->primary_gt->tlb_inval, - dep_scheduler, + dep_scheduler, vm, pt_update_ops->start, pt_update_ops->last, - vm->usm.asid); + XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT); if (IS_ERR(ijob)) { err = PTR_ERR(ijob); goto kill_vm_tile1; @@ -2410,26 +2399,15 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) mjob = xe_tlb_inval_job_create(q, &tile->media_gt->tlb_inval, - dep_scheduler, + dep_scheduler, vm, pt_update_ops->start, pt_update_ops->last, - vm->usm.asid); + XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT); if (IS_ERR(mjob)) { err = PTR_ERR(mjob); goto free_ijob; } update.mjob = mjob; - - fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL); - if (!fences) { - err = -ENOMEM; - goto free_ijob; - } - cf = dma_fence_array_alloc(2); - if (!cf) { - err = -ENOMEM; - goto free_ijob; - } } } @@ -2460,31 +2438,12 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) pt_update_ops->last, fence)) dma_fence_wait(fence, false); - /* tlb invalidation must be done before signaling unbind/rebind */ - if (ijob) { - struct dma_fence *__fence; - + if (ijob) ifence = xe_tlb_inval_job_push(ijob, tile->migrate, fence); - __fence = ifence; - - if (mjob) { - fences[0] = ifence; - mfence = xe_tlb_inval_job_push(mjob, tile->migrate, - fence); - fences[1] = mfence; - - dma_fence_array_init(cf, 2, fences, - vm->composite_fence_ctx, - vm->composite_fence_seqno++, - false); - __fence = &cf->base; - } - - dma_fence_put(fence); - fence = __fence; - } + if (mjob) + mfence = xe_tlb_inval_job_push(mjob, tile->migrate, fence); - if (!mjob) { + if (!mjob && !ijob) { dma_resv_add_fence(xe_vm_resv(vm), fence, pt_update_ops->wait_vm_bookkeep ? DMA_RESV_USAGE_KERNEL : @@ -2492,6 +2451,14 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) list_for_each_entry(op, &vops->list, link) op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL); + } else if (ijob && !mjob) { + dma_resv_add_fence(xe_vm_resv(vm), ifence, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : + DMA_RESV_USAGE_BOOKKEEP); + + list_for_each_entry(op, &vops->list, link) + op_commit(vops->vm, tile, pt_update_ops, op, ifence, NULL); } else { dma_resv_add_fence(xe_vm_resv(vm), ifence, pt_update_ops->wait_vm_bookkeep ? @@ -2511,16 +2478,23 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) if (pt_update_ops->needs_svm_lock) xe_svm_notifier_unlock(vm); + /* + * The last fence is only used for zero bind queue idling; migrate + * queues are not exposed to user space. + */ + if (!(q->flags & EXEC_QUEUE_FLAG_MIGRATE)) + xe_exec_queue_last_fence_set(q, vm, fence); + xe_tlb_inval_job_put(mjob); xe_tlb_inval_job_put(ijob); + dma_fence_put(ifence); + dma_fence_put(mfence); return fence; free_rfence: kfree(rfence); free_ijob: - kfree(cf); - kfree(fences); xe_tlb_inval_job_put(mjob); xe_tlb_inval_job_put(ijob); kill_vm_tile1: diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 2e9ff33ed2fe..1c0915e2cc16 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -436,7 +436,7 @@ static int query_hwconfig(struct xe_device *xe, struct drm_xe_device_query *query) { struct xe_gt *gt = xe_root_mmio_gt(xe); - size_t size = xe_guc_hwconfig_size(>->uc.guc); + size_t size = gt ? xe_guc_hwconfig_size(>->uc.guc) : 0; void __user *query_ptr = u64_to_user_ptr(query->data); void *hwconfig; diff --git a/drivers/gpu/drm/xe/xe_range_fence.h b/drivers/gpu/drm/xe/xe_range_fence.h index edd58b34f5c0..4934729dd904 100644 --- a/drivers/gpu/drm/xe/xe_range_fence.h +++ b/drivers/gpu/drm/xe/xe_range_fence.h @@ -13,13 +13,13 @@ struct xe_range_fence_tree; struct xe_range_fence; -/** struct xe_range_fence_ops - XE range fence ops */ +/** struct xe_range_fence_ops - Xe range fence ops */ struct xe_range_fence_ops { /** @free: free range fence op */ void (*free)(struct xe_range_fence *rfence); }; -/** struct xe_range_fence - XE range fence (address conflict tracking) */ +/** struct xe_range_fence - Xe range fence (address conflict tracking) */ struct xe_range_fence { /** @rb: RB tree node inserted into interval tree */ struct rb_node rb; diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c index 23f6c81d9994..7ca360b2c20d 100644 --- a/drivers/gpu/drm/xe/xe_reg_whitelist.c +++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c @@ -19,7 +19,8 @@ #undef XE_REG_MCR #define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1) -static bool match_not_render(const struct xe_gt *gt, +static bool match_not_render(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe) { return hwe->class != XE_ENGINE_CLASS_RENDER; @@ -88,6 +89,13 @@ static const struct xe_rtp_entry_sr register_whitelist[] = { RING_FORCE_TO_NONPRIV_ACCESS_RD | RING_FORCE_TO_NONPRIV_RANGE_4)) }, + { XE_RTP_NAME("14024997852"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(WHITELIST(FF_MODE, + RING_FORCE_TO_NONPRIV_ACCESS_RW), + WHITELIST(VFLSKPD, + RING_FORCE_TO_NONPRIV_ACCESS_RW)) + }, }; static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe) diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index d71837773d6c..ac0c6dcffe15 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -245,12 +245,14 @@ static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i) /* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc, - u64 batch_addr, u32 seqno) + u64 batch_addr, u32 *head, u32 seqno) { u32 dw[MAX_JOB_SIZE_DW], i = 0; u32 ppgtt_flag = get_ppgtt_flag(job); struct xe_gt *gt = job->q->gt; + *head = lrc->ring.tail; + i = emit_copy_timestamp(lrc, dw, i); if (job->ring_ops_flush_tlb) { @@ -296,7 +298,7 @@ static bool has_aux_ccs(struct xe_device *xe) } static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, - u64 batch_addr, u32 seqno) + u64 batch_addr, u32 *head, u32 seqno) { u32 dw[MAX_JOB_SIZE_DW], i = 0; u32 ppgtt_flag = get_ppgtt_flag(job); @@ -304,6 +306,8 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, struct xe_device *xe = gt_to_xe(gt); bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE; + *head = lrc->ring.tail; + i = emit_copy_timestamp(lrc, dw, i); dw[i++] = preparser_disable(true); @@ -346,7 +350,8 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, static void __emit_job_gen12_render_compute(struct xe_sched_job *job, struct xe_lrc *lrc, - u64 batch_addr, u32 seqno) + u64 batch_addr, u32 *head, + u32 seqno) { u32 dw[MAX_JOB_SIZE_DW], i = 0; u32 ppgtt_flag = get_ppgtt_flag(job); @@ -355,6 +360,8 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job, bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK); u32 mask_flags = 0; + *head = lrc->ring.tail; + i = emit_copy_timestamp(lrc, dw, i); dw[i++] = preparser_disable(true); @@ -396,11 +403,14 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job, } static void emit_migration_job_gen12(struct xe_sched_job *job, - struct xe_lrc *lrc, u32 seqno) + struct xe_lrc *lrc, u32 *head, + u32 seqno) { u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc); u32 dw[MAX_JOB_SIZE_DW], i = 0; + *head = lrc->ring.tail; + i = emit_copy_timestamp(lrc, dw, i); i = emit_store_imm_ggtt(saddr, seqno, dw, i); @@ -434,6 +444,7 @@ static void emit_job_gen12_gsc(struct xe_sched_job *job) __emit_job_gen12_simple(job, job->q->lrc[0], job->ptrs[0].batch_addr, + &job->ptrs[0].head, xe_sched_job_lrc_seqno(job)); } @@ -443,6 +454,7 @@ static void emit_job_gen12_copy(struct xe_sched_job *job) if (xe_sched_job_is_migration(job->q)) { emit_migration_job_gen12(job, job->q->lrc[0], + &job->ptrs[0].head, xe_sched_job_lrc_seqno(job)); return; } @@ -450,6 +462,7 @@ static void emit_job_gen12_copy(struct xe_sched_job *job) for (i = 0; i < job->q->width; ++i) __emit_job_gen12_simple(job, job->q->lrc[i], job->ptrs[i].batch_addr, + &job->ptrs[i].head, xe_sched_job_lrc_seqno(job)); } @@ -461,6 +474,7 @@ static void emit_job_gen12_video(struct xe_sched_job *job) for (i = 0; i < job->q->width; ++i) __emit_job_gen12_video(job, job->q->lrc[i], job->ptrs[i].batch_addr, + &job->ptrs[i].head, xe_sched_job_lrc_seqno(job)); } @@ -471,6 +485,7 @@ static void emit_job_gen12_render_compute(struct xe_sched_job *job) for (i = 0; i < job->q->width; ++i) __emit_job_gen12_render_compute(job, job->q->lrc[i], job->ptrs[i].batch_addr, + &job->ptrs[i].head, xe_sched_job_lrc_seqno(job)); } diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index b5f430d59f80..ed509b1c8cfc 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -133,10 +133,7 @@ static bool rule_matches(const struct xe_device *xe, match = hwe->class != r->engine_class; break; case XE_RTP_MATCH_FUNC: - if (drm_WARN_ON(&xe->drm, !gt)) - return false; - - match = r->match_func(gt, hwe); + match = r->match_func(xe, gt, hwe); break; default: drm_warn(&xe->drm, "Invalid RTP match %u\n", @@ -343,13 +340,15 @@ void xe_rtp_process(struct xe_rtp_process_ctx *ctx, } EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process); -bool xe_rtp_match_even_instance(const struct xe_gt *gt, +bool xe_rtp_match_even_instance(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe) { return hwe->instance % 2 == 0; } -bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt, +bool xe_rtp_match_first_render_or_compute(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe) { u64 render_compute_mask = gt->info.engine_mask & @@ -359,20 +358,30 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt, hwe->engine_id == __ffs(render_compute_mask); } -bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt, +bool xe_rtp_match_not_sriov_vf(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe) { - return !IS_SRIOV_VF(gt_to_xe(gt)); + return !IS_SRIOV_VF(xe); } -bool xe_rtp_match_psmi_enabled(const struct xe_gt *gt, +bool xe_rtp_match_psmi_enabled(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe) { - return xe_configfs_get_psmi_enabled(to_pci_dev(gt_to_xe(gt)->drm.dev)); + return xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev)); } -bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_gt *gt, +bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe) { return xe_gt_has_discontiguous_dss_groups(gt); } + +bool xe_rtp_match_has_flat_ccs(const struct xe_device *xe, + const struct xe_gt *gt, + const struct xe_hw_engine *hwe) +{ + return xe->info.has_flat_ccs; +} diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h index ac12ddf6cde6..ba5f940c0a96 100644 --- a/drivers/gpu/drm/xe/xe_rtp.h +++ b/drivers/gpu/drm/xe/xe_rtp.h @@ -440,18 +440,21 @@ void xe_rtp_process(struct xe_rtp_process_ctx *ctx, /** * xe_rtp_match_even_instance - Match if engine instance is even + * @xe: Device structure * @gt: GT structure * @hwe: Engine instance * * Returns: true if engine instance is even, false otherwise */ -bool xe_rtp_match_even_instance(const struct xe_gt *gt, +bool xe_rtp_match_even_instance(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe); /* * xe_rtp_match_first_render_or_compute - Match if it's first render or compute * engine in the GT * + * @xe: Device structure * @gt: GT structure * @hwe: Engine instance * @@ -463,24 +466,41 @@ bool xe_rtp_match_even_instance(const struct xe_gt *gt, * Returns: true if engine id is the first to match the render reset domain, * false otherwise. */ -bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt, +bool xe_rtp_match_first_render_or_compute(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe); /* * xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device * + * @xe: Device structure * @gt: GT structure * @hwe: Engine instance * * Returns: true if device is not VF, false otherwise. */ -bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt, +bool xe_rtp_match_not_sriov_vf(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe); -bool xe_rtp_match_psmi_enabled(const struct xe_gt *gt, +bool xe_rtp_match_psmi_enabled(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe); -bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_gt *gt, +bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe); +/** + * xe_rtp_match_has_flat_ccs - Match when platform has FlatCCS compression + * @xe: Device structure + * @gt: GT structure + * @hwe: Engine instance + * + * Returns: true if platform has FlatCCS compression, false otherwise + */ +bool xe_rtp_match_has_flat_ccs(const struct xe_device *xe, + const struct xe_gt *gt, + const struct xe_hw_engine *hwe); + #endif diff --git a/drivers/gpu/drm/xe/xe_rtp_types.h b/drivers/gpu/drm/xe/xe_rtp_types.h index f4cf30e298cf..6ba7f226c227 100644 --- a/drivers/gpu/drm/xe/xe_rtp_types.h +++ b/drivers/gpu/drm/xe/xe_rtp_types.h @@ -10,6 +10,7 @@ #include "regs/xe_reg_defs.h" +struct xe_device; struct xe_hw_engine; struct xe_gt; @@ -86,7 +87,8 @@ struct xe_rtp_rule { u8 engine_class; }; /* MATCH_FUNC */ - bool (*match_func)(const struct xe_gt *gt, + bool (*match_func)(const struct xe_device *xe, + const struct xe_gt *gt, const struct xe_hw_engine *hwe); }; }; diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index fedd017d6dd3..63a5263dcf1b 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -110,6 +110,10 @@ struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0); } +/** + * xe_sa_bo_flush_write() - Copy the data from the sub-allocation to the GPU memory. + * @sa_bo: the &drm_suballoc to flush + */ void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo) { struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager); @@ -123,6 +127,23 @@ void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo) drm_suballoc_size(sa_bo)); } +/** + * xe_sa_bo_sync_read() - Copy the data from GPU memory to the sub-allocation. + * @sa_bo: the &drm_suballoc to sync + */ +void xe_sa_bo_sync_read(struct drm_suballoc *sa_bo) +{ + struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager); + struct xe_device *xe = tile_to_xe(sa_manager->bo->tile); + + if (!sa_manager->bo->vmap.is_iomem) + return; + + xe_map_memcpy_from(xe, xe_sa_bo_cpu_addr(sa_bo), &sa_manager->bo->vmap, + drm_suballoc_soffset(sa_bo), + drm_suballoc_size(sa_bo)); +} + void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence) { diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h index 99dbf0eea540..1be744350836 100644 --- a/drivers/gpu/drm/xe/xe_sa.h +++ b/drivers/gpu/drm/xe/xe_sa.h @@ -37,6 +37,7 @@ static inline struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager } void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo); +void xe_sa_bo_sync_read(struct drm_suballoc *sa_bo); void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence); static inline struct xe_sa_manager * diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index d21bf8f26964..cb674a322113 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -146,6 +146,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q, for (i = 0; i < width; ++i) job->ptrs[i].batch_addr = batch_addr[i]; + atomic_inc(&q->job_cnt); xe_pm_runtime_get_noresume(job_to_xe(job)); trace_xe_sched_job_create(job); return job; @@ -160,11 +161,11 @@ err_free: } /** - * xe_sched_job_destroy - Destroy XE schedule job - * @ref: reference to XE schedule job + * xe_sched_job_destroy - Destroy Xe schedule job + * @ref: reference to Xe schedule job * * Called when ref == 0, drop a reference to job's xe_engine + fence, cleanup - * base DRM schedule job, and free memory for XE schedule job. + * base DRM schedule job, and free memory for Xe schedule job. */ void xe_sched_job_destroy(struct kref *ref) { @@ -177,6 +178,7 @@ void xe_sched_job_destroy(struct kref *ref) dma_fence_put(job->fence); drm_sched_job_cleanup(&job->drm); job_free(job); + atomic_dec(&q->job_cnt); xe_exec_queue_put(q); xe_pm_runtime_put(xe); } @@ -296,23 +298,6 @@ void xe_sched_job_push(struct xe_sched_job *job) } /** - * xe_sched_job_last_fence_add_dep - Add last fence dependency to job - * @job:job to add the last fence dependency to - * @vm: virtual memory job belongs to - * - * Returns: - * 0 on success, or an error on failing to expand the array. - */ -int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) -{ - struct dma_fence *fence; - - fence = xe_exec_queue_last_fence_get(job->q, vm); - - return drm_sched_job_add_dependency(&job->drm, fence); -} - -/** * xe_sched_job_init_user_fence - Initialize user_fence for the job * @job: job whose user_fence needs an init * @sync: sync to be use to init user_fence diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h index 3dc72c5c1f13..1c1cb44216c3 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.h +++ b/drivers/gpu/drm/xe/xe_sched_job.h @@ -23,10 +23,10 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q, void xe_sched_job_destroy(struct kref *ref); /** - * xe_sched_job_get - get reference to XE schedule job - * @job: XE schedule job object + * xe_sched_job_get - get reference to Xe schedule job + * @job: Xe schedule job object * - * Increment XE schedule job's reference count + * Increment Xe schedule job's reference count */ static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job) { @@ -35,10 +35,10 @@ static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job) } /** - * xe_sched_job_put - put reference to XE schedule job - * @job: XE schedule job object + * xe_sched_job_put - put reference to Xe schedule job + * @job: Xe schedule job object * - * Decrement XE schedule job's reference count, call xe_sched_job_destroy when + * Decrement Xe schedule job's reference count, call xe_sched_job_destroy when * reference count == 0. */ static inline void xe_sched_job_put(struct xe_sched_job *job) @@ -58,7 +58,6 @@ bool xe_sched_job_completed(struct xe_sched_job *job); void xe_sched_job_arm(struct xe_sched_job *job); void xe_sched_job_push(struct xe_sched_job *job); -int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm); void xe_sched_job_init_user_fence(struct xe_sched_job *job, struct xe_sync_entry *sync); diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h index dbf260dded8d..d26612abb4ca 100644 --- a/drivers/gpu/drm/xe/xe_sched_job_types.h +++ b/drivers/gpu/drm/xe/xe_sched_job_types.h @@ -24,10 +24,15 @@ struct xe_job_ptrs { struct dma_fence_chain *chain_fence; /** @batch_addr: Batch buffer address. */ u64 batch_addr; + /** + * @head: The tail pointer of the LRC (so head pointer of job) when the + * job was submitted + */ + u32 head; }; /** - * struct xe_sched_job - XE schedule job (batch buffer tracking) + * struct xe_sched_job - Xe schedule job (batch buffer tracking) */ struct xe_sched_job { /** @drm: base DRM scheduler job */ @@ -58,6 +63,10 @@ struct xe_sched_job { bool ring_ops_flush_tlb; /** @ggtt: mapped in ggtt. */ bool ggtt; + /** @skip_emit: skip emitting the job */ + bool skip_emit; + /** @last_replay: last job being replayed */ + bool last_replay; /** @ptrs: per instance pointers. */ struct xe_job_ptrs ptrs[]; }; diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c index 7d2d6de2aabf..ea411944609b 100644 --- a/drivers/gpu/drm/xe/xe_sriov.c +++ b/drivers/gpu/drm/xe/xe_sriov.c @@ -167,6 +167,8 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t size) */ int xe_sriov_init_late(struct xe_device *xe) { + if (IS_SRIOV_PF(xe)) + return xe_sriov_pf_init_late(xe); if (IS_SRIOV_VF(xe)) return xe_sriov_vf_init_late(xe); diff --git a/drivers/gpu/drm/xe/xe_sriov_packet.c b/drivers/gpu/drm/xe/xe_sriov_packet.c new file mode 100644 index 000000000000..bab994696896 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_packet.c @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include "xe_bo.h" +#include "xe_device.h" +#include "xe_guc_klv_helpers.h" +#include "xe_printk.h" +#include "xe_sriov_packet.h" +#include "xe_sriov_packet_types.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" +#include "xe_sriov_printk.h" + +static struct mutex *pf_migration_mutex(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + + return &xe->sriov.pf.vfs[vfid].migration.lock; +} + +static struct xe_sriov_packet **pf_pick_pending(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.pending; +} + +static struct xe_sriov_packet ** +pf_pick_descriptor(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.descriptor; +} + +static struct xe_sriov_packet **pf_pick_trailer(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.trailer; +} + +static struct xe_sriov_packet **pf_pick_read_packet(struct xe_device *xe, + unsigned int vfid) +{ + struct xe_sriov_packet **data; + + data = pf_pick_descriptor(xe, vfid); + if (*data) + return data; + + data = pf_pick_pending(xe, vfid); + if (!*data) + *data = xe_sriov_pf_migration_save_consume(xe, vfid); + if (*data) + return data; + + data = pf_pick_trailer(xe, vfid); + if (*data) + return data; + + return NULL; +} + +static bool pkt_needs_bo(struct xe_sriov_packet *data) +{ + return data->hdr.type == XE_SRIOV_PACKET_TYPE_VRAM; +} + +/** + * xe_sriov_packet_alloc() - Allocate migration data packet + * @xe: the &xe_device + * + * Only allocates the "outer" structure, without initializing the migration + * data backing storage. + * + * Return: Pointer to &xe_sriov_packet on success, + * NULL in case of error. + */ +struct xe_sriov_packet *xe_sriov_packet_alloc(struct xe_device *xe) +{ + struct xe_sriov_packet *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + + data->xe = xe; + data->hdr_remaining = sizeof(data->hdr); + + return data; +} + +/** + * xe_sriov_packet_free() - Free migration data packet. + * @data: the &xe_sriov_packet + */ +void xe_sriov_packet_free(struct xe_sriov_packet *data) +{ + if (IS_ERR_OR_NULL(data)) + return; + + if (pkt_needs_bo(data)) + xe_bo_unpin_map_no_vm(data->bo); + else + kvfree(data->buff); + + kfree(data); +} + +static int pkt_init(struct xe_sriov_packet *data) +{ + struct xe_gt *gt = xe_device_get_gt(data->xe, data->hdr.gt_id); + + if (!gt) + return -EINVAL; + + if (data->hdr.size == 0) + return 0; + + if (pkt_needs_bo(data)) { + struct xe_bo *bo; + + bo = xe_bo_create_pin_map_novm(data->xe, gt->tile, PAGE_ALIGN(data->hdr.size), + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED, false); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + data->bo = bo; + data->vaddr = bo->vmap.vaddr; + } else { + void *buff = kvzalloc(data->hdr.size, GFP_KERNEL); + + if (!buff) + return -ENOMEM; + + data->buff = buff; + data->vaddr = buff; + } + + return 0; +} + +#define XE_SRIOV_PACKET_SUPPORTED_VERSION 1 + +/** + * xe_sriov_packet_init() - Initialize migration packet header and backing storage. + * @data: the &xe_sriov_packet + * @tile_id: tile identifier + * @gt_id: GT identifier + * @type: &xe_sriov_packet_type + * @offset: offset of data packet payload (within wider resource) + * @size: size of data packet payload + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_packet_init(struct xe_sriov_packet *data, u8 tile_id, u8 gt_id, + enum xe_sriov_packet_type type, loff_t offset, size_t size) +{ + data->hdr.version = XE_SRIOV_PACKET_SUPPORTED_VERSION; + data->hdr.type = type; + data->hdr.tile_id = tile_id; + data->hdr.gt_id = gt_id; + data->hdr.offset = offset; + data->hdr.size = size; + data->remaining = size; + + return pkt_init(data); +} + +/** + * xe_sriov_packet_init_from_hdr() - Initialize migration packet backing storage based on header. + * @data: the &xe_sriov_packet + * + * Header data is expected to be filled prior to calling this function. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_packet_init_from_hdr(struct xe_sriov_packet *data) +{ + xe_assert(data->xe, !data->hdr_remaining); + + if (data->hdr.version != XE_SRIOV_PACKET_SUPPORTED_VERSION) + return -EINVAL; + + data->remaining = data->hdr.size; + + return pkt_init(data); +} + +static ssize_t pkt_hdr_read(struct xe_sriov_packet *data, + char __user *buf, size_t len) +{ + loff_t offset = sizeof(data->hdr) - data->hdr_remaining; + + if (!data->hdr_remaining) + return -EINVAL; + + if (len > data->hdr_remaining) + len = data->hdr_remaining; + + if (copy_to_user(buf, (void *)&data->hdr + offset, len)) + return -EFAULT; + + data->hdr_remaining -= len; + + return len; +} + +static ssize_t pkt_data_read(struct xe_sriov_packet *data, + char __user *buf, size_t len) +{ + if (len > data->remaining) + len = data->remaining; + + if (copy_to_user(buf, data->vaddr + (data->hdr.size - data->remaining), len)) + return -EFAULT; + + data->remaining -= len; + + return len; +} + +static ssize_t pkt_read_single(struct xe_sriov_packet **data, + unsigned int vfid, char __user *buf, size_t len) +{ + ssize_t copied = 0; + + if ((*data)->hdr_remaining) + copied = pkt_hdr_read(*data, buf, len); + else + copied = pkt_data_read(*data, buf, len); + + if ((*data)->remaining == 0 && (*data)->hdr_remaining == 0) { + xe_sriov_packet_free(*data); + *data = NULL; + } + + return copied; +} + +/** + * xe_sriov_packet_read_single() - Read migration data from a single packet. + * @xe: the &xe_device + * @vfid: the VF identifier + * @buf: start address of userspace buffer + * @len: requested read size from userspace + * + * Return: number of bytes that has been successfully read, + * 0 if no more migration data is available, + * -errno on failure. + */ +ssize_t xe_sriov_packet_read_single(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len) +{ + struct xe_sriov_packet **data = pf_pick_read_packet(xe, vfid); + + if (!data) + return -ENODATA; + if (IS_ERR(*data)) + return PTR_ERR(*data); + + return pkt_read_single(data, vfid, buf, len); +} + +static ssize_t pkt_hdr_write(struct xe_sriov_packet *data, + const char __user *buf, size_t len) +{ + loff_t offset = sizeof(data->hdr) - data->hdr_remaining; + int ret; + + if (len > data->hdr_remaining) + len = data->hdr_remaining; + + if (copy_from_user((void *)&data->hdr + offset, buf, len)) + return -EFAULT; + + data->hdr_remaining -= len; + + if (!data->hdr_remaining) { + ret = xe_sriov_packet_init_from_hdr(data); + if (ret) + return ret; + } + + return len; +} + +static ssize_t pkt_data_write(struct xe_sriov_packet *data, + const char __user *buf, size_t len) +{ + if (len > data->remaining) + len = data->remaining; + + if (copy_from_user(data->vaddr + (data->hdr.size - data->remaining), buf, len)) + return -EFAULT; + + data->remaining -= len; + + return len; +} + +/** + * xe_sriov_packet_write_single() - Write migration data to a single packet. + * @xe: the &xe_device + * @vfid: the VF identifier + * @buf: start address of userspace buffer + * @len: requested write size from userspace + * + * Return: number of bytes that has been successfully written, + * -errno on failure. + */ +ssize_t xe_sriov_packet_write_single(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len) +{ + struct xe_sriov_packet **data = pf_pick_pending(xe, vfid); + int ret; + ssize_t copied; + + if (IS_ERR_OR_NULL(*data)) { + *data = xe_sriov_packet_alloc(xe); + if (!*data) + return -ENOMEM; + } + + if ((*data)->hdr_remaining) + copied = pkt_hdr_write(*data, buf, len); + else + copied = pkt_data_write(*data, buf, len); + + if ((*data)->hdr_remaining == 0 && (*data)->remaining == 0) { + ret = xe_sriov_pf_migration_restore_produce(xe, vfid, *data); + if (ret) { + xe_sriov_packet_free(*data); + return ret; + } + + *data = NULL; + } + + return copied; +} + +#define MIGRATION_KLV_DEVICE_DEVID_KEY 0xf001u +#define MIGRATION_KLV_DEVICE_DEVID_LEN 1u +#define MIGRATION_KLV_DEVICE_REVID_KEY 0xf002u +#define MIGRATION_KLV_DEVICE_REVID_LEN 1u + +#define MIGRATION_DESCRIPTOR_DWORDS (GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_DEVID_LEN + \ + GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_REVID_LEN) +static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_packet **desc = pf_pick_descriptor(xe, vfid); + struct xe_sriov_packet *data; + unsigned int len = 0; + u32 *klvs; + int ret; + + data = xe_sriov_packet_alloc(xe); + if (!data) + return -ENOMEM; + + ret = xe_sriov_packet_init(data, 0, 0, XE_SRIOV_PACKET_TYPE_DESCRIPTOR, + 0, MIGRATION_DESCRIPTOR_DWORDS * sizeof(u32)); + if (ret) { + xe_sriov_packet_free(data); + return ret; + } + + klvs = data->vaddr; + klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_DEVID_KEY, + MIGRATION_KLV_DEVICE_DEVID_LEN); + klvs[len++] = xe->info.devid; + klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_REVID_KEY, + MIGRATION_KLV_DEVICE_REVID_LEN); + klvs[len++] = xe->info.revid; + + xe_assert(xe, len == MIGRATION_DESCRIPTOR_DWORDS); + + *desc = data; + + return 0; +} + +/** + * xe_sriov_packet_process_descriptor() - Process migration data descriptor packet. + * @xe: the &xe_device + * @vfid: the VF identifier + * @data: the &xe_sriov_packet containing the descriptor + * + * The descriptor uses the same KLV format as GuC, and contains metadata used for + * checking migration data compatibility. + * + * Return: 0 on success, -errno on failure. + */ +int xe_sriov_packet_process_descriptor(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_packet *data) +{ + u32 num_dwords = data->hdr.size / sizeof(u32); + u32 *klvs = data->vaddr; + + xe_assert(xe, data->hdr.type == XE_SRIOV_PACKET_TYPE_DESCRIPTOR); + + if (data->hdr.size % sizeof(u32)) { + xe_sriov_warn(xe, "Aborting migration, descriptor not in KLV format (size=%llu)\n", + data->hdr.size); + return -EINVAL; + } + + while (num_dwords >= GUC_KLV_LEN_MIN) { + u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]); + u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]); + + klvs += GUC_KLV_LEN_MIN; + num_dwords -= GUC_KLV_LEN_MIN; + + if (len > num_dwords) { + xe_sriov_warn(xe, "Aborting migration, truncated KLV %#x, len %u\n", + key, len); + return -EINVAL; + } + + switch (key) { + case MIGRATION_KLV_DEVICE_DEVID_KEY: + if (*klvs != xe->info.devid) { + xe_sriov_warn(xe, + "Aborting migration, devid mismatch %#06x!=%#06x\n", + *klvs, xe->info.devid); + return -ENODEV; + } + break; + case MIGRATION_KLV_DEVICE_REVID_KEY: + if (*klvs != xe->info.revid) { + xe_sriov_warn(xe, + "Aborting migration, revid mismatch %#06x!=%#06x\n", + *klvs, xe->info.revid); + return -ENODEV; + } + break; + default: + xe_sriov_dbg(xe, + "Skipping unknown migration KLV %#x, len=%u\n", + key, len); + print_hex_dump_bytes("desc: ", DUMP_PREFIX_OFFSET, klvs, + min(SZ_64, len * sizeof(u32))); + break; + } + + klvs += len; + num_dwords -= len; + } + + return 0; +} + +static void pf_pending_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_packet **data = pf_pick_pending(xe, vfid); + + *data = NULL; +} + +#define MIGRATION_TRAILER_SIZE 0 +static int pf_trailer_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_packet **trailer = pf_pick_trailer(xe, vfid); + struct xe_sriov_packet *data; + int ret; + + data = xe_sriov_packet_alloc(xe); + if (!data) + return -ENOMEM; + + ret = xe_sriov_packet_init(data, 0, 0, XE_SRIOV_PACKET_TYPE_TRAILER, + 0, MIGRATION_TRAILER_SIZE); + if (ret) { + xe_sriov_packet_free(data); + return ret; + } + + *trailer = data; + + return 0; +} + +/** + * xe_sriov_packet_save_init() - Initialize the pending save migration packets. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * Return: 0 on success, -errno on failure. + */ +int xe_sriov_packet_save_init(struct xe_device *xe, unsigned int vfid) +{ + int ret; + + scoped_cond_guard(mutex_intr, return -EINTR, pf_migration_mutex(xe, vfid)) { + ret = pf_descriptor_init(xe, vfid); + if (ret) + return ret; + + ret = pf_trailer_init(xe, vfid); + if (ret) + return ret; + + pf_pending_init(xe, vfid); + } + + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_sriov_packet.h b/drivers/gpu/drm/xe/xe_sriov_packet.h new file mode 100644 index 000000000000..2731e52cf7ef --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_packet.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PACKET_H_ +#define _XE_SRIOV_PACKET_H_ + +#include <linux/types.h> + +struct xe_device; +struct xe_sriov_packet; +enum xe_sriov_packet_type; + +struct xe_sriov_packet *xe_sriov_packet_alloc(struct xe_device *xe); +void xe_sriov_packet_free(struct xe_sriov_packet *data); + +int xe_sriov_packet_init(struct xe_sriov_packet *data, u8 tile_id, u8 gt_id, + enum xe_sriov_packet_type, loff_t offset, size_t size); +int xe_sriov_packet_init_from_hdr(struct xe_sriov_packet *data); + +ssize_t xe_sriov_packet_read_single(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len); +ssize_t xe_sriov_packet_write_single(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len); +int xe_sriov_packet_save_init(struct xe_device *xe, unsigned int vfid); +int xe_sriov_packet_process_descriptor(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_packet *data); + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_packet_types.h b/drivers/gpu/drm/xe/xe_sriov_packet_types.h new file mode 100644 index 000000000000..078a1c95e786 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_packet_types.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PACKET_TYPES_H_ +#define _XE_SRIOV_PACKET_TYPES_H_ + +#include <linux/types.h> + +/** + * enum xe_sriov_packet_type - Xe SR-IOV VF migration data packet type + * @XE_SRIOV_PACKET_TYPE_DESCRIPTOR: Descriptor with VF device metadata + * @XE_SRIOV_PACKET_TYPE_TRAILER: Trailer indicating end-of-stream + * @XE_SRIOV_PACKET_TYPE_GGTT: Global GTT migration data + * @XE_SRIOV_PACKET_TYPE_MMIO: MMIO registers migration data + * @XE_SRIOV_PACKET_TYPE_GUC: GuC firmware migration data + * @XE_SRIOV_PACKET_TYPE_VRAM: VRAM migration data + */ +enum xe_sriov_packet_type { + /* Skipping 0 to catch uninitialized data */ + XE_SRIOV_PACKET_TYPE_DESCRIPTOR = 1, + XE_SRIOV_PACKET_TYPE_TRAILER, + XE_SRIOV_PACKET_TYPE_GGTT, + XE_SRIOV_PACKET_TYPE_MMIO, + XE_SRIOV_PACKET_TYPE_GUC, + XE_SRIOV_PACKET_TYPE_VRAM, +}; + +/** + * struct xe_sriov_packet_hdr - Xe SR-IOV VF migration data packet header + */ +struct xe_sriov_packet_hdr { + /** @version: migration data protocol version */ + u8 version; + /** @type: migration data type */ + u8 type; + /** @tile_id: migration data tile id */ + u8 tile_id; + /** @gt_id: migration data gt id */ + u8 gt_id; + /** @flags: migration data flags */ + u32 flags; + /** + * @offset: offset into the resource; + * used when multiple packets of given type are used for migration + */ + u64 offset; + /** @size: migration data size */ + u64 size; +} __packed; + +/** + * struct xe_sriov_packet - Xe SR-IOV VF migration data packet + */ +struct xe_sriov_packet { + /** @xe: the PF &xe_device this data packet belongs to */ + struct xe_device *xe; + /** @vaddr: CPU pointer to payload data */ + void *vaddr; + /** @remaining: payload data remaining */ + size_t remaining; + /** @hdr_remaining: header data remaining */ + size_t hdr_remaining; + union { + /** @bo: Buffer object with migration data */ + struct xe_bo *bo; + /** @buff: Buffer with migration data */ + void *buff; + }; + /** @hdr: data packet header */ + struct xe_sriov_packet_hdr hdr; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c index 27ddf3cc80e9..7c779d63179f 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf.c @@ -8,17 +8,22 @@ #include <drm/drm_managed.h> #include "xe_assert.h" +#include "xe_configfs.h" #include "xe_device.h" #include "xe_gt_sriov_pf.h" #include "xe_module.h" #include "xe_sriov.h" #include "xe_sriov_pf.h" #include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" #include "xe_sriov_pf_service.h" +#include "xe_sriov_pf_sysfs.h" #include "xe_sriov_printk.h" static unsigned int wanted_max_vfs(struct xe_device *xe) { + if (IS_ENABLED(CONFIG_CONFIGFS_FS)) + return xe_configfs_get_max_vfs(to_pci_dev(xe->drm.dev)); return xe_modparam.max_vfs; } @@ -98,12 +103,47 @@ int xe_sriov_pf_init_early(struct xe_device *xe) if (err) return err; + err = xe_sriov_pf_migration_init(xe); + if (err) + return err; + + xe_guard_init(&xe->sriov.pf.guard_vfs_enabling, "vfs_enabling"); + xe_sriov_pf_service_init(xe); return 0; } /** + * xe_sriov_pf_init_late() - Late initialization of the SR-IOV PF. + * @xe: the &xe_device to initialize + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_init_late(struct xe_device *xe) +{ + struct xe_gt *gt; + unsigned int id; + int err; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_init(gt); + if (err) + return err; + } + + err = xe_sriov_pf_sysfs_init(xe); + if (err) + return err; + + return 0; +} + +/** * xe_sriov_pf_wait_ready() - Wait until PF is ready to operate. * @xe: the &xe_device to test * @@ -130,61 +170,114 @@ int xe_sriov_pf_wait_ready(struct xe_device *xe) } /** - * xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information. - * @xe: the &xe_device to print info from - * @p: the &drm_printer + * xe_sriov_pf_arm_guard() - Arm the guard for exclusive/lockdown mode. + * @xe: the PF &xe_device + * @guard: the &xe_guard to arm + * @lockdown: arm for lockdown(true) or exclusive(false) mode + * @who: the address of the new owner, or NULL if it's a caller * - * Print SR-IOV PF related information into provided DRM printer. + * This function can only be called on PF. + * + * It is a simple wrapper for xe_guard_arm() with additional debug + * messages. + * + * Return: 0 on success or a negative error code on failure. */ -void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p) +int xe_sriov_pf_arm_guard(struct xe_device *xe, struct xe_guard *guard, + bool lockdown, void *who) { - struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + void *new_owner = who ?: __builtin_return_address(0); + int err; - xe_assert(xe, IS_SRIOV_PF(xe)); + err = xe_guard_arm(guard, lockdown, new_owner); + if (err) { + xe_sriov_dbg(xe, "%s/%s mode denied (%pe) last owner %ps\n", + guard->name, xe_guard_mode_str(lockdown), + ERR_PTR(err), guard->owner); + return err; + } - drm_printf(p, "total: %u\n", xe->sriov.pf.device_total_vfs); - drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs); - drm_printf(p, "enabled: %u\n", pci_num_vf(pdev)); + xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n", + guard->name, xe_guard_mode_str(lockdown), + new_owner); + return 0; } -static int simple_show(struct seq_file *m, void *data) +/** + * xe_sriov_pf_disarm_guard() - Disarm the guard. + * @xe: the PF &xe_device + * @guard: the &xe_guard to disarm + * @lockdown: disarm from lockdown(true) or exclusive(false) mode + * @who: the address of the indirect owner, or NULL if it's a caller + * + * This function can only be called on PF. + * + * It is a simple wrapper for xe_guard_disarm() with additional debug + * messages and xe_assert() to easily catch any illegal calls. + */ +void xe_sriov_pf_disarm_guard(struct xe_device *xe, struct xe_guard *guard, + bool lockdown, void *who) { - struct drm_printer p = drm_seq_file_printer(m); - struct drm_info_node *node = m->private; - struct dentry *parent = node->dent->d_parent; - struct xe_device *xe = parent->d_inode->i_private; - void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data; + bool disarmed; - print(xe, &p); - return 0; + xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n", + guard->name, xe_guard_mode_str(lockdown), + who ?: __builtin_return_address(0)); + + disarmed = xe_guard_disarm(guard, lockdown); + xe_assert_msg(xe, disarmed, "%s/%s not armed? last owner %ps", + guard->name, xe_guard_mode_str(lockdown), guard->owner); } -static const struct drm_info_list debugfs_list[] = { - { .name = "vfs", .show = simple_show, .data = xe_sriov_pf_print_vfs_summary }, - { .name = "versions", .show = simple_show, .data = xe_sriov_pf_service_print_versions }, -}; +/** + * xe_sriov_pf_lockdown() - Lockdown the PF to prevent VFs enabling. + * @xe: the PF &xe_device + * + * This function can only be called on PF. + * + * Once the PF is locked down, it will not enable VFs. + * If VFs are already enabled, the -EBUSY will be returned. + * To allow the PF enable VFs again call xe_sriov_pf_end_lockdown(). + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_lockdown(struct xe_device *xe) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + + return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true, + __builtin_return_address(0)); +} /** - * xe_sriov_pf_debugfs_register - Register PF debugfs attributes. - * @xe: the &xe_device - * @root: the root &dentry + * xe_sriov_pf_end_lockdown() - Allow the PF to enable VFs again. + * @xe: the PF &xe_device * - * Prepare debugfs attributes exposed by the PF. + * This function can only be called on PF. + * See xe_sriov_pf_lockdown() for details. */ -void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root) +void xe_sriov_pf_end_lockdown(struct xe_device *xe) { - struct drm_minor *minor = xe->drm.primary; - struct dentry *parent; - - /* - * /sys/kernel/debug/dri/0/ - * ├── pf - * │ ├── ... - */ - parent = debugfs_create_dir("pf", root); - if (IS_ERR(parent)) - return; - parent->d_inode->i_private = xe; - - drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), parent, minor); + xe_assert(xe, IS_SRIOV_PF(xe)); + + xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true, + __builtin_return_address(0)); +} + +/** + * xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information. + * @xe: the &xe_device to print info from + * @p: the &drm_printer + * + * Print SR-IOV PF related information into provided DRM printer. + */ +void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + + xe_assert(xe, IS_SRIOV_PF(xe)); + + drm_printf(p, "total: %u\n", xe->sriov.pf.device_total_vfs); + drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs); + drm_printf(p, "enabled: %u\n", pci_num_vf(pdev)); } diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h index e3b34f8f5e04..b4d050ad5b7c 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf.h @@ -15,23 +15,17 @@ struct xe_device; #ifdef CONFIG_PCI_IOV bool xe_sriov_pf_readiness(struct xe_device *xe); int xe_sriov_pf_init_early(struct xe_device *xe); +int xe_sriov_pf_init_late(struct xe_device *xe); int xe_sriov_pf_wait_ready(struct xe_device *xe); -void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root); +int xe_sriov_pf_lockdown(struct xe_device *xe); +void xe_sriov_pf_end_lockdown(struct xe_device *xe); void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p); #else -static inline bool xe_sriov_pf_readiness(struct xe_device *xe) -{ - return false; -} - -static inline int xe_sriov_pf_init_early(struct xe_device *xe) -{ - return 0; -} - -static inline void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root) -{ -} +static inline bool xe_sriov_pf_readiness(struct xe_device *xe) { return false; } +static inline int xe_sriov_pf_init_early(struct xe_device *xe) { return 0; } +static inline int xe_sriov_pf_init_late(struct xe_device *xe) { return 0; } +static inline int xe_sriov_pf_lockdown(struct xe_device *xe) { return 0; } +static inline void xe_sriov_pf_end_lockdown(struct xe_device *xe) { } #endif #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_sriov_pf_control.c new file mode 100644 index 000000000000..ed4b9820b06e --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include "xe_device.h" +#include "xe_gt_sriov_pf_control.h" +#include "xe_gt_sriov_pf_migration.h" +#include "xe_sriov_packet.h" +#include "xe_sriov_pf_control.h" +#include "xe_sriov_printk.h" + +/** + * xe_sriov_pf_control_pause_vf() - Pause a VF on all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier (can't be 0 == PFID) + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_pause_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_control_pause_vf(gt, vfid); + result = result ? -EUCLEAN : err; + } + + if (result) + return result; + + xe_sriov_info(xe, "VF%u paused!\n", vfid); + return 0; +} + +/** + * xe_sriov_pf_control_resume_vf() - Resume a VF on all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_resume_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_control_resume_vf(gt, vfid); + result = result ? -EUCLEAN : err; + } + + if (result) + return result; + + xe_sriov_info(xe, "VF%u resumed!\n", vfid); + return 0; +} + +/** + * xe_sriov_pf_control_stop_vf - Stop a VF on all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_stop_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_control_stop_vf(gt, vfid); + result = result ? -EUCLEAN : err; + } + + if (result) + return result; + + xe_sriov_info(xe, "VF%u stopped!\n", vfid); + return 0; +} + +/** + * xe_sriov_pf_control_reset_vf() - Perform a VF reset (FLR). + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_control_trigger_flr(gt, vfid); + result = result ? -EUCLEAN : err; + } + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_control_wait_flr(gt, vfid); + result = result ? -EUCLEAN : err; + } + + return result; +} + +/** + * xe_sriov_pf_control_wait_flr() - Wait for a VF reset (FLR) to complete. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_wait_flr(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_control_wait_flr(gt, vfid); + result = result ? -EUCLEAN : err; + } + + return result; +} + +/** + * xe_sriov_pf_control_sync_flr() - Synchronize a VF FLR between all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int ret; + + for_each_gt(gt, xe, id) { + ret = xe_gt_sriov_pf_control_sync_flr(gt, vfid, false); + if (ret < 0) + return ret; + } + for_each_gt(gt, xe, id) { + ret = xe_gt_sriov_pf_control_sync_flr(gt, vfid, true); + if (ret < 0) + return ret; + } + + return 0; +} + +/** + * xe_sriov_pf_control_trigger_save_vf() - Start VF migration data SAVE sequence on all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int ret; + + ret = xe_sriov_packet_save_init(xe, vfid); + if (ret) + return ret; + + for_each_gt(gt, xe, id) { + xe_gt_sriov_pf_migration_save_init(gt, vfid); + + ret = xe_gt_sriov_pf_control_trigger_save_vf(gt, vfid); + if (ret) + return ret; + } + + return 0; +} + +/** + * xe_sriov_pf_control_finish_save_vf() - Complete VF migration data SAVE sequence on all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_finish_save_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int ret; + + for_each_gt(gt, xe, id) { + ret = xe_gt_sriov_pf_control_finish_save_vf(gt, vfid); + if (ret) + break; + } + + return ret; +} + +/** + * xe_sriov_pf_control_trigger_restore_vf() - Start VF migration data RESTORE sequence on all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_trigger_restore_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int ret; + + for_each_gt(gt, xe, id) { + ret = xe_gt_sriov_pf_control_trigger_restore_vf(gt, vfid); + if (ret) + return ret; + } + + return ret; +} + +/** + * xe_sriov_pf_control_finish_restore_vf() - Complete VF migration data RESTORE sequence on all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_finish_restore_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int ret; + + for_each_gt(gt, xe, id) { + ret = xe_gt_sriov_pf_control_finish_restore_vf(gt, vfid); + if (ret) + break; + } + + return ret; +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_sriov_pf_control.h new file mode 100644 index 000000000000..ef9f219b2109 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_CONTROL_H_ +#define _XE_SRIOV_PF_CONTROL_H_ + +struct xe_device; + +int xe_sriov_pf_control_pause_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_resume_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_stop_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_wait_flr(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_finish_save_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_trigger_restore_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_finish_restore_vf(struct xe_device *xe, unsigned int vfid); + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c new file mode 100644 index 000000000000..bad751217e1e --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <linux/debugfs.h> +#include <drm/drm_debugfs.h> + +#include "xe_device.h" +#include "xe_device_types.h" +#include "xe_pm.h" +#include "xe_sriov_pf.h" +#include "xe_sriov_pf_control.h" +#include "xe_sriov_pf_debugfs.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" +#include "xe_sriov_pf_provision.h" +#include "xe_sriov_pf_service.h" +#include "xe_sriov_printk.h" +#include "xe_tile_sriov_pf_debugfs.h" + +/* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov # d_inode->i_private = (xe_device*) + * │ ├── pf # d_inode->i_private = (xe_device*) + * │ ├── vf1 # d_inode->i_private = VFID(1) + * : : + * │ ├── vfN # d_inode->i_private = VFID(N) + */ + +static void *extract_priv(struct dentry *d) +{ + return d->d_inode->i_private; +} + +static struct xe_device *extract_xe(struct dentry *d) +{ + return extract_priv(d->d_parent); +} + +static unsigned int extract_vfid(struct dentry *d) +{ + void *p = extract_priv(d); + + return p == extract_xe(d) ? PFID : (uintptr_t)p; +} + +/* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * │ ├── restore_auto_provisioning + * │ : + * │ ├── pf/ + * │ ├── vf1 + * │ │ ├── ... + */ + +static ssize_t from_file_write_to_xe_call(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos, + int (*call)(struct xe_device *)) +{ + struct dentry *dent = file_dentry(file); + struct xe_device *xe = extract_xe(dent); + bool yes; + int ret; + + if (*ppos) + return -EINVAL; + ret = kstrtobool_from_user(userbuf, count, &yes); + if (ret < 0) + return ret; + if (yes) { + xe_pm_runtime_get(xe); + ret = call(xe); + xe_pm_runtime_put(xe); + } + if (ret < 0) + return ret; + return count; +} + +#define DEFINE_SRIOV_ATTRIBUTE(OP) \ +static int OP##_show(struct seq_file *s, void *unused) \ +{ \ + return 0; \ +} \ +static ssize_t OP##_write(struct file *file, const char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + return from_file_write_to_xe_call(file, userbuf, count, ppos, \ + xe_sriov_pf_##OP); \ +} \ +DEFINE_SHOW_STORE_ATTRIBUTE(OP) + +static inline int xe_sriov_pf_restore_auto_provisioning(struct xe_device *xe) +{ + return xe_sriov_pf_provision_set_mode(xe, XE_SRIOV_PROVISIONING_MODE_AUTO); +} + +DEFINE_SRIOV_ATTRIBUTE(restore_auto_provisioning); + +static int lockdown_vfs_enabling_open(struct inode *inode, struct file *file) +{ + struct dentry *dent = file_dentry(file); + struct xe_device *xe = extract_xe(dent); + ssize_t ret; + + ret = xe_sriov_pf_lockdown(xe); + if (ret < 0) + return ret; + + file->private_data = xe; + return nonseekable_open(inode, file); +} + +static int lockdown_vfs_enabling_release(struct inode *inode, struct file *file) +{ + struct xe_device *xe = file->private_data; + + xe_sriov_pf_end_lockdown(xe); + return 0; +} + +static const struct file_operations lockdown_vfs_enabling_fops = { + .owner = THIS_MODULE, + .open = lockdown_vfs_enabling_open, + .release = lockdown_vfs_enabling_release, +}; + +static void pf_populate_root(struct xe_device *xe, struct dentry *dent) +{ + debugfs_create_file("restore_auto_provisioning", 0200, dent, xe, + &restore_auto_provisioning_fops); + debugfs_create_file("lockdown_vfs_enabling", 0400, dent, xe, + &lockdown_vfs_enabling_fops); +} + +static int simple_show(struct seq_file *m, void *data) +{ + struct drm_printer p = drm_seq_file_printer(m); + struct drm_info_node *node = m->private; + struct dentry *parent = node->dent->d_parent; + struct xe_device *xe = parent->d_inode->i_private; + void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data; + + print(xe, &p); + return 0; +} + +static const struct drm_info_list debugfs_list[] = { + { .name = "vfs", .show = simple_show, .data = xe_sriov_pf_print_vfs_summary }, + { .name = "versions", .show = simple_show, .data = xe_sriov_pf_service_print_versions }, +}; + +static void pf_populate_pf(struct xe_device *xe, struct dentry *pfdent) +{ + struct drm_minor *minor = xe->drm.primary; + + drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), pfdent, minor); +} + +/* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * │ ├── vf1 + * │ │ ├── migration_data + * │ │ ├── pause + * │ │ ├── reset + * │ │ ├── resume + * │ │ ├── stop + * │ │ ├── save + * │ │ ├── restore + * │ │ : + * │ ├── vf2 + * │ │ ├── ... + */ + +static int from_file_read_to_vf_call(struct seq_file *s, + int (*call)(struct xe_device *, unsigned int)) +{ + struct dentry *dent = file_dentry(s->file)->d_parent; + struct xe_device *xe = extract_xe(dent); + unsigned int vfid = extract_vfid(dent); + int ret; + + xe_pm_runtime_get(xe); + ret = call(xe, vfid); + xe_pm_runtime_put(xe); + + if (ret < 0) + return ret; + + return 0; +} + +static ssize_t from_file_write_to_vf_call(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos, + int (*call)(struct xe_device *, unsigned int)) +{ + struct dentry *dent = file_dentry(file)->d_parent; + struct xe_device *xe = extract_xe(dent); + unsigned int vfid = extract_vfid(dent); + bool yes; + int ret; + + if (*ppos) + return -EINVAL; + ret = kstrtobool_from_user(userbuf, count, &yes); + if (ret < 0) + return ret; + if (yes) { + xe_pm_runtime_get(xe); + ret = call(xe, vfid); + xe_pm_runtime_put(xe); + } + if (ret < 0) + return ret; + return count; +} + +#define DEFINE_VF_CONTROL_ATTRIBUTE(OP) \ +static int OP##_show(struct seq_file *s, void *unused) \ +{ \ + return 0; \ +} \ +static ssize_t OP##_write(struct file *file, const char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + return from_file_write_to_vf_call(file, userbuf, count, ppos, \ + xe_sriov_pf_control_##OP); \ +} \ +DEFINE_SHOW_STORE_ATTRIBUTE(OP) + +#define DEFINE_VF_CONTROL_ATTRIBUTE_RW(OP) \ +static int OP##_show(struct seq_file *s, void *unused) \ +{ \ + return from_file_read_to_vf_call(s, \ + xe_sriov_pf_control_finish_##OP); \ +} \ +static ssize_t OP##_write(struct file *file, const char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + return from_file_write_to_vf_call(file, userbuf, count, ppos, \ + xe_sriov_pf_control_trigger_##OP); \ +} \ +DEFINE_SHOW_STORE_ATTRIBUTE(OP) + +DEFINE_VF_CONTROL_ATTRIBUTE(pause_vf); +DEFINE_VF_CONTROL_ATTRIBUTE(resume_vf); +DEFINE_VF_CONTROL_ATTRIBUTE(stop_vf); +DEFINE_VF_CONTROL_ATTRIBUTE(reset_vf); +DEFINE_VF_CONTROL_ATTRIBUTE_RW(save_vf); +DEFINE_VF_CONTROL_ATTRIBUTE_RW(restore_vf); + +static ssize_t data_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file)->d_parent; + struct xe_device *xe = extract_xe(dent); + unsigned int vfid = extract_vfid(dent); + + if (*pos) + return -ESPIPE; + + return xe_sriov_pf_migration_write(xe, vfid, buf, count); +} + +static ssize_t data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct dentry *dent = file_dentry(file)->d_parent; + struct xe_device *xe = extract_xe(dent); + unsigned int vfid = extract_vfid(dent); + + if (*ppos) + return -ESPIPE; + + return xe_sriov_pf_migration_read(xe, vfid, buf, count); +} + +static const struct file_operations data_vf_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = data_write, + .read = data_read, + .llseek = default_llseek, +}; + +static ssize_t size_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) +{ + struct dentry *dent = file_dentry(file)->d_parent; + struct xe_device *xe = extract_xe(dent); + unsigned int vfid = extract_vfid(dent); + char buf[21]; + ssize_t ret; + int len; + + xe_pm_runtime_get(xe); + ret = xe_sriov_pf_migration_size(xe, vfid); + xe_pm_runtime_put(xe); + if (ret < 0) + return ret; + + len = scnprintf(buf, sizeof(buf), "%zd\n", ret); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations size_vf_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = size_read, + .llseek = default_llseek, +}; + +static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent) +{ + debugfs_create_file("pause", 0200, vfdent, xe, &pause_vf_fops); + debugfs_create_file("resume", 0200, vfdent, xe, &resume_vf_fops); + debugfs_create_file("stop", 0200, vfdent, xe, &stop_vf_fops); + debugfs_create_file("reset", 0200, vfdent, xe, &reset_vf_fops); + debugfs_create_file("save", 0600, vfdent, xe, &save_vf_fops); + debugfs_create_file("restore", 0600, vfdent, xe, &restore_vf_fops); + debugfs_create_file("migration_data", 0600, vfdent, xe, &data_vf_fops); + debugfs_create_file("migration_size", 0400, vfdent, xe, &size_vf_fops); +} + +static void pf_populate_with_tiles(struct xe_device *xe, struct dentry *dent, unsigned int vfid) +{ + struct xe_tile *tile; + unsigned int id; + + for_each_tile(tile, xe, id) + xe_tile_sriov_pf_debugfs_populate(tile, dent, vfid); +} + +/** + * xe_sriov_pf_debugfs_register - Register PF debugfs attributes. + * @xe: the &xe_device + * @root: the root &dentry + * + * Create separate directory that will contain all SR-IOV related files, + * organized per each SR-IOV function (PF, VF1, VF2, ..., VFn). + */ +void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root) +{ + int totalvfs = xe_sriov_pf_get_totalvfs(xe); + struct dentry *pfdent; + struct dentry *vfdent; + struct dentry *dent; + char vfname[16]; /* should be more than enough for "vf%u\0" and VFID(UINT_MAX) */ + unsigned int n; + + /* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov # d_inode->i_private = (xe_device*) + * │ ├── ... + */ + dent = debugfs_create_dir("sriov", root); + if (IS_ERR(dent)) + return; + dent->d_inode->i_private = xe; + + pf_populate_root(xe, dent); + + /* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov # d_inode->i_private = (xe_device*) + * │ ├── pf # d_inode->i_private = (xe_device*) + * │ │ ├── ... + */ + pfdent = debugfs_create_dir("pf", dent); + if (IS_ERR(pfdent)) + return; + pfdent->d_inode->i_private = xe; + + pf_populate_pf(xe, pfdent); + pf_populate_with_tiles(xe, pfdent, PFID); + + /* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov # d_inode->i_private = (xe_device*) + * │ ├── vf1 # d_inode->i_private = VFID(1) + * │ ├── vf2 # d_inode->i_private = VFID(2) + * │ ├── ... + */ + for (n = 1; n <= totalvfs; n++) { + snprintf(vfname, sizeof(vfname), "vf%u", VFID(n)); + vfdent = debugfs_create_dir(vfname, dent); + if (IS_ERR(vfdent)) + return; + vfdent->d_inode->i_private = (void *)(uintptr_t)VFID(n); + + pf_populate_vf(xe, vfdent); + pf_populate_with_tiles(xe, vfdent, VFID(n)); + } +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h new file mode 100644 index 000000000000..93db13585b82 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_DEBUGFS_H_ +#define _XE_SRIOV_PF_DEBUGFS_H_ + +struct dentry; +struct xe_device; + +#ifdef CONFIG_PCI_IOV +void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root); +#else +static inline void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root) { } +#endif + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h index dd1df950b021..9054fdc34597 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h @@ -37,10 +37,37 @@ static inline int xe_sriov_pf_get_totalvfs(struct xe_device *xe) return xe->sriov.pf.driver_max_vfs; } +/** + * xe_sriov_pf_num_vfs() - Number of enabled VFs on the PF. + * @xe: the PF &xe_device + * + * Return: Number of enabled VFs on the PF. + */ +static inline unsigned int xe_sriov_pf_num_vfs(const struct xe_device *xe) +{ + return pci_num_vf(to_pci_dev(xe->drm.dev)); +} + +/** + * xe_sriov_pf_admin_only() - Check if PF is mainly used for VFs administration. + * @xe: the PF &xe_device + * + * Return: True if PF is mainly used for VFs administration. + */ +static inline bool xe_sriov_pf_admin_only(const struct xe_device *xe) +{ + return !xe->info.probe_display; +} + static inline struct mutex *xe_sriov_pf_master_mutex(struct xe_device *xe) { xe_assert(xe, IS_SRIOV_PF(xe)); return &xe->sriov.pf.master_lock; } +int xe_sriov_pf_arm_guard(struct xe_device *xe, struct xe_guard *guard, + bool write, void *who); +void xe_sriov_pf_disarm_guard(struct xe_device *xe, struct xe_guard *guard, + bool write, void *who); + #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c new file mode 100644 index 000000000000..de06cc690fc8 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c @@ -0,0 +1,340 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <drm/drm_managed.h> + +#include "xe_device.h" +#include "xe_gt_sriov_pf_control.h" +#include "xe_gt_sriov_pf_migration.h" +#include "xe_pm.h" +#include "xe_sriov.h" +#include "xe_sriov_packet.h" +#include "xe_sriov_packet_types.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" +#include "xe_sriov_printk.h" + +static struct xe_sriov_migration_state *pf_pick_migration(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + + return &xe->sriov.pf.vfs[vfid].migration; +} + +/** + * xe_sriov_pf_migration_waitqueue() - Get waitqueue for migration. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * Return: pointer to the migration waitqueue. + */ +wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid) +{ + return &pf_pick_migration(xe, vfid)->wq; +} + +/** + * xe_sriov_pf_migration_supported() - Check if SR-IOV VF migration is supported by the device + * @xe: the &xe_device + * + * Return: true if migration is supported, false otherwise + */ +bool xe_sriov_pf_migration_supported(struct xe_device *xe) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + + return xe->sriov.pf.migration.supported; +} + +static bool pf_check_migration_support(struct xe_device *xe) +{ + /* XXX: for now this is for feature enabling only */ + return IS_ENABLED(CONFIG_DRM_XE_DEBUG); +} + +static void pf_migration_cleanup(void *arg) +{ + struct xe_sriov_migration_state *migration = arg; + + xe_sriov_packet_free(migration->pending); + xe_sriov_packet_free(migration->trailer); + xe_sriov_packet_free(migration->descriptor); +} + +/** + * xe_sriov_pf_migration_init() - Initialize support for SR-IOV VF migration. + * @xe: the &xe_device + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_migration_init(struct xe_device *xe) +{ + unsigned int n, totalvfs; + int err; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + xe->sriov.pf.migration.supported = pf_check_migration_support(xe); + if (!xe_sriov_pf_migration_supported(xe)) + return 0; + + totalvfs = xe_sriov_pf_get_totalvfs(xe); + for (n = 1; n <= totalvfs; n++) { + struct xe_sriov_migration_state *migration = pf_pick_migration(xe, n); + + err = drmm_mutex_init(&xe->drm, &migration->lock); + if (err) + return err; + + init_waitqueue_head(&migration->wq); + + err = devm_add_action_or_reset(xe->drm.dev, pf_migration_cleanup, migration); + if (err) + return err; + } + + return 0; +} + +static bool pf_migration_data_ready(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + u8 gt_id; + + for_each_gt(gt, xe, gt_id) { + if (xe_gt_sriov_pf_control_check_save_failed(gt, vfid) || + xe_gt_sriov_pf_control_check_save_data_done(gt, vfid) || + !xe_gt_sriov_pf_migration_ring_empty(gt, vfid)) + return true; + } + + return false; +} + +static struct xe_sriov_packet * +pf_migration_consume(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_packet *data; + bool more_data = false; + struct xe_gt *gt; + u8 gt_id; + + for_each_gt(gt, xe, gt_id) { + data = xe_gt_sriov_pf_migration_save_consume(gt, vfid); + if (data && PTR_ERR(data) != EAGAIN) + return data; + if (PTR_ERR(data) == -EAGAIN) + more_data = true; + } + + if (!more_data) + return NULL; + + return ERR_PTR(-EAGAIN); +} + +/** + * xe_sriov_pf_migration_save_consume() - Consume a VF migration data packet from the device. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * Called by the save migration data consumer (userspace) when + * processing migration data. + * If there is no migration data to process, wait until more data is available. + * + * Return: Pointer to &xe_sriov_packet on success, + * NULL if ring is empty and no more migration data is expected, + * ERR_PTR value in case of error. + */ +struct xe_sriov_packet * +xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_migration_state *migration = pf_pick_migration(xe, vfid); + struct xe_sriov_packet *data; + int ret; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + for (;;) { + data = pf_migration_consume(xe, vfid); + if (PTR_ERR(data) != -EAGAIN) + break; + + ret = wait_event_interruptible(migration->wq, + pf_migration_data_ready(xe, vfid)); + if (ret) + return ERR_PTR(ret); + } + + return data; +} + +static int pf_handle_descriptor(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_packet *data) +{ + int ret; + + if (data->hdr.tile_id != 0 || data->hdr.gt_id != 0) + return -EINVAL; + + ret = xe_sriov_packet_process_descriptor(xe, vfid, data); + if (ret) + return ret; + + xe_sriov_packet_free(data); + + return 0; +} + +static int pf_handle_trailer(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_packet *data) +{ + struct xe_gt *gt; + u8 gt_id; + + if (data->hdr.tile_id != 0 || data->hdr.gt_id != 0) + return -EINVAL; + if (data->hdr.offset != 0 || data->hdr.size != 0 || data->buff || data->bo) + return -EINVAL; + + xe_sriov_packet_free(data); + + for_each_gt(gt, xe, gt_id) + xe_gt_sriov_pf_control_restore_data_done(gt, vfid); + + return 0; +} + +/** + * xe_sriov_pf_migration_restore_produce() - Produce a VF migration data packet to the device. + * @xe: the &xe_device + * @vfid: the VF identifier + * @data: Pointer to &xe_sriov_packet + * + * Called by the restore migration data producer (userspace) when processing + * migration data. + * If the underlying data structure is full, wait until there is space. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_packet *data) +{ + struct xe_gt *gt; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + if (data->hdr.type == XE_SRIOV_PACKET_TYPE_DESCRIPTOR) + return pf_handle_descriptor(xe, vfid, data); + if (data->hdr.type == XE_SRIOV_PACKET_TYPE_TRAILER) + return pf_handle_trailer(xe, vfid, data); + + gt = xe_device_get_gt(xe, data->hdr.gt_id); + if (!gt || data->hdr.tile_id != gt->tile->id || data->hdr.type == 0) { + xe_sriov_err_ratelimited(xe, "Received invalid restore packet for VF%u (type:%u, tile:%u, GT:%u)\n", + vfid, data->hdr.type, data->hdr.tile_id, data->hdr.gt_id); + return -EINVAL; + } + + return xe_gt_sriov_pf_migration_restore_produce(gt, vfid, data); +} + +/** + * xe_sriov_pf_migration_read() - Read migration data from the device. + * @xe: the &xe_device + * @vfid: the VF identifier + * @buf: start address of userspace buffer + * @len: requested read size from userspace + * + * Return: number of bytes that has been successfully read, + * 0 if no more migration data is available, + * -errno on failure. + */ +ssize_t xe_sriov_pf_migration_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len) +{ + struct xe_sriov_migration_state *migration = pf_pick_migration(xe, vfid); + ssize_t ret, consumed = 0; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + scoped_cond_guard(mutex_intr, return -EINTR, &migration->lock) { + while (consumed < len) { + ret = xe_sriov_packet_read_single(xe, vfid, buf, len - consumed); + if (ret == -ENODATA) + break; + if (ret < 0) + return ret; + + consumed += ret; + buf += ret; + } + } + + return consumed; +} + +/** + * xe_sriov_pf_migration_write() - Write migration data to the device. + * @xe: the &xe_device + * @vfid: the VF identifier + * @buf: start address of userspace buffer + * @len: requested write size from userspace + * + * Return: number of bytes that has been successfully written, + * -errno on failure. + */ +ssize_t xe_sriov_pf_migration_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len) +{ + struct xe_sriov_migration_state *migration = pf_pick_migration(xe, vfid); + ssize_t ret, produced = 0; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + scoped_cond_guard(mutex_intr, return -EINTR, &migration->lock) { + while (produced < len) { + ret = xe_sriov_packet_write_single(xe, vfid, buf, len - produced); + if (ret < 0) + return ret; + + produced += ret; + buf += ret; + } + } + + return produced; +} + +/** + * xe_sriov_pf_migration_size() - Total size of migration data from all components within a device + * @xe: the &xe_device + * @vfid: the VF identifier (can't be 0) + * + * This function is for PF only. + * + * Return: total migration data size in bytes or a negative error code on failure. + */ +ssize_t xe_sriov_pf_migration_size(struct xe_device *xe, unsigned int vfid) +{ + size_t size = 0; + struct xe_gt *gt; + ssize_t ret; + u8 gt_id; + + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid); + + for_each_gt(gt, xe, gt_id) { + ret = xe_gt_sriov_pf_migration_size(gt, vfid); + if (ret < 0) + return ret; + + size += ret; + } + + return size; +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h new file mode 100644 index 000000000000..b806298a0bb6 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_MIGRATION_H_ +#define _XE_SRIOV_PF_MIGRATION_H_ + +#include <linux/types.h> +#include <linux/wait.h> + +struct xe_device; +struct xe_sriov_packet; + +int xe_sriov_pf_migration_init(struct xe_device *xe); +bool xe_sriov_pf_migration_supported(struct xe_device *xe); +int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_packet *data); +struct xe_sriov_packet * +xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid); +ssize_t xe_sriov_pf_migration_size(struct xe_device *xe, unsigned int vfid); +wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid); + +ssize_t xe_sriov_pf_migration_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len); +ssize_t xe_sriov_pf_migration_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len); + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h new file mode 100644 index 000000000000..363d673ee1dd --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_MIGRATION_TYPES_H_ +#define _XE_SRIOV_PF_MIGRATION_TYPES_H_ + +#include <linux/types.h> +#include <linux/mutex_types.h> +#include <linux/wait.h> + +/** + * struct xe_sriov_pf_migration - Xe device level VF migration data + */ +struct xe_sriov_pf_migration { + /** @supported: indicates whether VF migration feature is supported */ + bool supported; +}; + +/** + * struct xe_sriov_migration_state - Per VF device-level migration related data + */ +struct xe_sriov_migration_state { + /** @wq: waitqueue used to avoid busy-waiting for snapshot production/consumption */ + wait_queue_head_t wq; + /** @lock: Mutex protecting the migration data */ + struct mutex lock; + /** @pending: currently processed data packet of VF resource */ + struct xe_sriov_packet *pending; + /** @trailer: data packet used to indicate the end of stream */ + struct xe_sriov_packet *trailer; + /** @descriptor: data packet containing the metadata describing the device */ + struct xe_sriov_packet *descriptor; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_provision.c b/drivers/gpu/drm/xe/xe_sriov_pf_provision.c new file mode 100644 index 000000000000..01470c42e8a7 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_provision.c @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include "xe_assert.h" +#include "xe_device.h" +#include "xe_gt_sriov_pf_config.h" +#include "xe_gt_sriov_pf_policy.h" +#include "xe_sriov.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_provision.h" +#include "xe_sriov_pf_provision_types.h" +#include "xe_sriov_printk.h" + +static const char *mode_to_string(enum xe_sriov_provisioning_mode mode) +{ + switch (mode) { + case XE_SRIOV_PROVISIONING_MODE_AUTO: + return "auto"; + case XE_SRIOV_PROVISIONING_MODE_CUSTOM: + return "custom"; + default: + return "<invalid>"; + } +} + +static bool pf_auto_provisioning_mode(struct xe_device *xe) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + + return xe->sriov.pf.provision.mode == XE_SRIOV_PROVISIONING_MODE_AUTO; +} + +static bool pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs) +{ + unsigned int n; + + for (n = 1; n <= num_vfs; n++) + if (!xe_gt_sriov_pf_config_is_empty(gt, n)) + return false; + + return true; +} + +static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + for_each_gt(gt, xe, id) { + if (!pf_needs_provisioning(gt, num_vfs)) + return -EUCLEAN; + err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs); + result = result ?: err; + } + + return result; +} + +static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs) +{ + struct xe_gt *gt; + unsigned int id; + unsigned int n; + + for_each_gt(gt, xe, id) + for (n = 1; n <= num_vfs; n++) + xe_gt_sriov_pf_config_release(gt, n, true); +} + +static void pf_unprovision_all_vfs(struct xe_device *xe) +{ + pf_unprovision_vfs(xe, xe_sriov_pf_get_totalvfs(xe)); +} + +/** + * xe_sriov_pf_provision_vfs() - Provision VFs in auto-mode. + * @xe: the PF &xe_device + * @num_vfs: the number of VFs to auto-provision + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + + if (!pf_auto_provisioning_mode(xe)) + return 0; + + return pf_provision_vfs(xe, num_vfs); +} + +/** + * xe_sriov_pf_unprovision_vfs() - Unprovision VFs in auto-mode. + * @xe: the PF &xe_device + * @num_vfs: the number of VFs to unprovision + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + + if (!pf_auto_provisioning_mode(xe)) + return 0; + + pf_unprovision_vfs(xe, num_vfs); + return 0; +} + +/** + * xe_sriov_pf_provision_set_mode() - Change VFs provision mode. + * @xe: the PF &xe_device + * @mode: the new VFs provisioning mode + * + * When changing from AUTO to CUSTOM mode, any already allocated VFs resources + * will remain allocated and will not be released upon VFs disabling. + * + * When changing back to AUTO mode, if VFs are not enabled, already allocated + * VFs resources will be immediately released. If VFs are still enabled, such + * mode change is rejected. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_set_mode(struct xe_device *xe, enum xe_sriov_provisioning_mode mode) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + + if (mode == xe->sriov.pf.provision.mode) + return 0; + + if (mode == XE_SRIOV_PROVISIONING_MODE_AUTO) { + if (xe_sriov_pf_num_vfs(xe)) { + xe_sriov_dbg(xe, "can't restore %s: VFs must be disabled!\n", + mode_to_string(mode)); + return -EBUSY; + } + pf_unprovision_all_vfs(xe); + } + + xe_sriov_dbg(xe, "mode %s changed to %s by %ps\n", + mode_to_string(xe->sriov.pf.provision.mode), + mode_to_string(mode), __builtin_return_address(0)); + xe->sriov.pf.provision.mode = mode; + return 0; +} + +/** + * xe_sriov_pf_provision_bulk_apply_eq() - Change execution quantum for all VFs and PF. + * @xe: the PF &xe_device + * @eq: execution quantum in [ms] to set + * + * Change execution quantum (EQ) provisioning on all tiles/GTs. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_bulk_apply_eq(struct xe_device *xe, u32 eq) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + guard(mutex)(xe_sriov_pf_master_mutex(xe)); + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(gt, eq); + result = result ?: err; + } + + return result; +} + +/** + * xe_sriov_pf_provision_apply_vf_eq() - Change VF's execution quantum. + * @xe: the PF &xe_device + * @vfid: the VF identifier + * @eq: execution quantum in [ms] to set + * + * Change VF's execution quantum (EQ) provisioning on all tiles/GTs. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_apply_vf_eq(struct xe_device *xe, unsigned int vfid, u32 eq) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + guard(mutex)(xe_sriov_pf_master_mutex(xe)); + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_config_set_exec_quantum_locked(gt, vfid, eq); + result = result ?: err; + } + + return result; +} + +static int pf_report_unclean(struct xe_gt *gt, unsigned int vfid, + const char *what, u32 found, u32 expected) +{ + char name[8]; + + xe_sriov_dbg(gt_to_xe(gt), "%s on GT%u has %s=%u (expected %u)\n", + xe_sriov_function_name(vfid, name, sizeof(name)), + gt->info.id, what, found, expected); + return -EUCLEAN; +} + +/** + * xe_sriov_pf_provision_query_vf_eq() - Query VF's execution quantum. + * @xe: the PF &xe_device + * @vfid: the VF identifier + * @eq: placeholder for the returned execution quantum in [ms] + * + * Query VF's execution quantum (EQ) provisioning from all tiles/GTs. + * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_query_vf_eq(struct xe_device *xe, unsigned int vfid, u32 *eq) +{ + struct xe_gt *gt; + unsigned int id; + int count = 0; + u32 value; + + guard(mutex)(xe_sriov_pf_master_mutex(xe)); + + for_each_gt(gt, xe, id) { + value = xe_gt_sriov_pf_config_get_exec_quantum_locked(gt, vfid); + if (!count++) + *eq = value; + else if (value != *eq) + return pf_report_unclean(gt, vfid, "EQ", value, *eq); + } + + return !count ? -ENODATA : 0; +} + +/** + * xe_sriov_pf_provision_bulk_apply_pt() - Change preemption timeout for all VFs and PF. + * @xe: the PF &xe_device + * @pt: preemption timeout in [us] to set + * + * Change preemption timeout (PT) provisioning on all tiles/GTs. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_bulk_apply_pt(struct xe_device *xe, u32 pt) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + guard(mutex)(xe_sriov_pf_master_mutex(xe)); + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(gt, pt); + result = result ?: err; + } + + return result; +} + +/** + * xe_sriov_pf_provision_apply_vf_pt() - Change VF's preemption timeout. + * @xe: the PF &xe_device + * @vfid: the VF identifier + * @pt: preemption timeout in [us] to set + * + * Change VF's preemption timeout (PT) provisioning on all tiles/GTs. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_apply_vf_pt(struct xe_device *xe, unsigned int vfid, u32 pt) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + guard(mutex)(xe_sriov_pf_master_mutex(xe)); + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_config_set_preempt_timeout_locked(gt, vfid, pt); + result = result ?: err; + } + + return result; +} + +/** + * xe_sriov_pf_provision_query_vf_pt() - Query VF's preemption timeout. + * @xe: the PF &xe_device + * @vfid: the VF identifier + * @pt: placeholder for the returned preemption timeout in [us] + * + * Query VF's preemption timeout (PT) provisioning from all tiles/GTs. + * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_query_vf_pt(struct xe_device *xe, unsigned int vfid, u32 *pt) +{ + struct xe_gt *gt; + unsigned int id; + int count = 0; + u32 value; + + guard(mutex)(xe_sriov_pf_master_mutex(xe)); + + for_each_gt(gt, xe, id) { + value = xe_gt_sriov_pf_config_get_preempt_timeout_locked(gt, vfid); + if (!count++) + *pt = value; + else if (value != *pt) + return pf_report_unclean(gt, vfid, "PT", value, *pt); + } + + return !count ? -ENODATA : 0; +} + +/** + * xe_sriov_pf_provision_bulk_apply_priority() - Change scheduling priority of all VFs and PF. + * @xe: the PF &xe_device + * @prio: scheduling priority to set + * + * Change the scheduling priority provisioning on all tiles/GTs. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_bulk_apply_priority(struct xe_device *xe, u32 prio) +{ + bool sched_if_idle; + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + /* + * Currently, priority changes that involves VFs are only allowed using + * the 'sched_if_idle' policy KLV, so only LOW and NORMAL are supported. + */ + xe_assert(xe, prio < GUC_SCHED_PRIORITY_HIGH); + sched_if_idle = prio == GUC_SCHED_PRIORITY_NORMAL; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_policy_set_sched_if_idle(gt, sched_if_idle); + result = result ?: err; + } + + return result; +} + +/** + * xe_sriov_pf_provision_apply_vf_priority() - Change VF's scheduling priority. + * @xe: the PF &xe_device + * @vfid: the VF identifier + * @prio: scheduling priority to set + * + * Change VF's scheduling priority provisioning on all tiles/GTs. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_apply_vf_priority(struct xe_device *xe, unsigned int vfid, u32 prio) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_config_set_sched_priority(gt, vfid, prio); + result = result ?: err; + } + + return result; +} + +/** + * xe_sriov_pf_provision_query_vf_priority() - Query VF's scheduling priority. + * @xe: the PF &xe_device + * @vfid: the VF identifier + * @prio: placeholder for the returned scheduling priority + * + * Query VF's scheduling priority provisioning from all tiles/GTs. + * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_provision_query_vf_priority(struct xe_device *xe, unsigned int vfid, u32 *prio) +{ + struct xe_gt *gt; + unsigned int id; + int count = 0; + u32 value; + + for_each_gt(gt, xe, id) { + value = xe_gt_sriov_pf_config_get_sched_priority(gt, vfid); + if (!count++) + *prio = value; + else if (value != *prio) + return pf_report_unclean(gt, vfid, "priority", value, *prio); + } + + return !count ? -ENODATA : 0; +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_provision.h b/drivers/gpu/drm/xe/xe_sriov_pf_provision.h new file mode 100644 index 000000000000..bccf23d51396 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_provision.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_PROVISION_H_ +#define _XE_SRIOV_PF_PROVISION_H_ + +#include <linux/types.h> + +#include "xe_sriov_pf_provision_types.h" + +struct xe_device; + +int xe_sriov_pf_provision_bulk_apply_eq(struct xe_device *xe, u32 eq); +int xe_sriov_pf_provision_apply_vf_eq(struct xe_device *xe, unsigned int vfid, u32 eq); +int xe_sriov_pf_provision_query_vf_eq(struct xe_device *xe, unsigned int vfid, u32 *eq); + +int xe_sriov_pf_provision_bulk_apply_pt(struct xe_device *xe, u32 pt); +int xe_sriov_pf_provision_apply_vf_pt(struct xe_device *xe, unsigned int vfid, u32 pt); +int xe_sriov_pf_provision_query_vf_pt(struct xe_device *xe, unsigned int vfid, u32 *pt); + +int xe_sriov_pf_provision_bulk_apply_priority(struct xe_device *xe, u32 prio); +int xe_sriov_pf_provision_apply_vf_priority(struct xe_device *xe, unsigned int vfid, u32 prio); +int xe_sriov_pf_provision_query_vf_priority(struct xe_device *xe, unsigned int vfid, u32 *prio); + +int xe_sriov_pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs); +int xe_sriov_pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs); + +int xe_sriov_pf_provision_set_mode(struct xe_device *xe, enum xe_sriov_provisioning_mode mode); + +/** + * xe_sriov_pf_provision_set_custom_mode() - Change VFs provision mode to custom. + * @xe: the PF &xe_device + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int xe_sriov_pf_provision_set_custom_mode(struct xe_device *xe) +{ + return xe_sriov_pf_provision_set_mode(xe, XE_SRIOV_PROVISIONING_MODE_CUSTOM); +} + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h new file mode 100644 index 000000000000..a847b8a4c4da --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_PROVISION_TYPES_H_ +#define _XE_SRIOV_PF_PROVISION_TYPES_H_ + +#include <linux/build_bug.h> + +/** + * enum xe_sriov_provisioning_mode - SR-IOV provisioning mode. + * + * @XE_SRIOV_PROVISIONING_MODE_AUTO: VFs are provisioned during VFs enabling. + * Any allocated resources to the VFs will be + * automatically released when disabling VFs. + * This is a default mode. + * @XE_SRIOV_PROVISIONING_MODE_CUSTOM: Explicit VFs provisioning using uABI interfaces. + * VFs resources remains allocated regardless if + * VFs are enabled or not. + */ +enum xe_sriov_provisioning_mode { + XE_SRIOV_PROVISIONING_MODE_AUTO, + XE_SRIOV_PROVISIONING_MODE_CUSTOM, +}; +static_assert(XE_SRIOV_PROVISIONING_MODE_AUTO == 0); + +/** + * struct xe_sriov_pf_provision - Data used by the PF provisioning. + */ +struct xe_sriov_pf_provision { + /** @mode: selected provisioning mode. */ + enum xe_sriov_provisioning_mode mode; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c new file mode 100644 index 000000000000..c0b767ac735c --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c @@ -0,0 +1,647 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <linux/kobject.h> +#include <linux/sysfs.h> + +#include <drm/drm_managed.h> + +#include "xe_assert.h" +#include "xe_pci_sriov.h" +#include "xe_pm.h" +#include "xe_sriov.h" +#include "xe_sriov_pf.h" +#include "xe_sriov_pf_control.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_provision.h" +#include "xe_sriov_pf_sysfs.h" +#include "xe_sriov_printk.h" + +static int emit_choice(char *buf, int choice, const char * const *array, size_t size) +{ + int pos = 0; + int n; + + for (n = 0; n < size; n++) { + pos += sysfs_emit_at(buf, pos, "%s%s%s%s", + n ? " " : "", + n == choice ? "[" : "", + array[n], + n == choice ? "]" : ""); + } + pos += sysfs_emit_at(buf, pos, "\n"); + + return pos; +} + +/* + * /sys/bus/pci/drivers/xe/BDF/ + * : + * ├── sriov_admin/ + * ├── ... + * ├── .bulk_profile + * │ ├── exec_quantum_ms + * │ ├── preempt_timeout_us + * │ └── sched_priority + * ├── pf/ + * │ ├── ... + * │ ├── device -> ../../../BDF + * │ └── profile + * │ ├── exec_quantum_ms + * │ ├── preempt_timeout_us + * │ └── sched_priority + * ├── vf1/ + * │ ├── ... + * │ ├── device -> ../../../BDF.1 + * │ ├── stop + * │ └── profile + * │ ├── exec_quantum_ms + * │ ├── preempt_timeout_us + * │ └── sched_priority + * ├── vf2/ + * : + * └── vfN/ + */ + +struct xe_sriov_kobj { + struct kobject base; + struct xe_device *xe; + unsigned int vfid; +}; +#define to_xe_sriov_kobj(p) container_of_const((p), struct xe_sriov_kobj, base) + +struct xe_sriov_dev_attr { + struct attribute attr; + ssize_t (*show)(struct xe_device *xe, char *buf); + ssize_t (*store)(struct xe_device *xe, const char *buf, size_t count); +}; +#define to_xe_sriov_dev_attr(p) container_of_const((p), struct xe_sriov_dev_attr, attr) + +#define XE_SRIOV_DEV_ATTR(NAME) \ +struct xe_sriov_dev_attr xe_sriov_dev_attr_##NAME = \ + __ATTR(NAME, 0644, xe_sriov_dev_attr_##NAME##_show, xe_sriov_dev_attr_##NAME##_store) + +#define XE_SRIOV_DEV_ATTR_RO(NAME) \ +struct xe_sriov_dev_attr xe_sriov_dev_attr_##NAME = \ + __ATTR(NAME, 0444, xe_sriov_dev_attr_##NAME##_show, NULL) + +#define XE_SRIOV_DEV_ATTR_WO(NAME) \ +struct xe_sriov_dev_attr xe_sriov_dev_attr_##NAME = \ + __ATTR(NAME, 0200, NULL, xe_sriov_dev_attr_##NAME##_store) + +struct xe_sriov_vf_attr { + struct attribute attr; + ssize_t (*show)(struct xe_device *xe, unsigned int vfid, char *buf); + ssize_t (*store)(struct xe_device *xe, unsigned int vfid, const char *buf, size_t count); +}; +#define to_xe_sriov_vf_attr(p) container_of_const((p), struct xe_sriov_vf_attr, attr) + +#define XE_SRIOV_VF_ATTR(NAME) \ +struct xe_sriov_vf_attr xe_sriov_vf_attr_##NAME = \ + __ATTR(NAME, 0644, xe_sriov_vf_attr_##NAME##_show, xe_sriov_vf_attr_##NAME##_store) + +#define XE_SRIOV_VF_ATTR_RO(NAME) \ +struct xe_sriov_vf_attr xe_sriov_vf_attr_##NAME = \ + __ATTR(NAME, 0444, xe_sriov_vf_attr_##NAME##_show, NULL) + +#define XE_SRIOV_VF_ATTR_WO(NAME) \ +struct xe_sriov_vf_attr xe_sriov_vf_attr_##NAME = \ + __ATTR(NAME, 0200, NULL, xe_sriov_vf_attr_##NAME##_store) + +/* device level attributes go here */ + +#define DEFINE_SIMPLE_BULK_PROVISIONING_SRIOV_DEV_ATTR_WO(NAME, ITEM, TYPE) \ + \ +static ssize_t xe_sriov_dev_attr_##NAME##_store(struct xe_device *xe, \ + const char *buf, size_t count) \ +{ \ + TYPE value; \ + int err; \ + \ + err = kstrto##TYPE(buf, 0, &value); \ + if (err) \ + return err; \ + \ + err = xe_sriov_pf_provision_bulk_apply_##ITEM(xe, value); \ + return err ?: count; \ +} \ + \ +static XE_SRIOV_DEV_ATTR_WO(NAME) + +DEFINE_SIMPLE_BULK_PROVISIONING_SRIOV_DEV_ATTR_WO(exec_quantum_ms, eq, u32); +DEFINE_SIMPLE_BULK_PROVISIONING_SRIOV_DEV_ATTR_WO(preempt_timeout_us, pt, u32); + +static const char * const sched_priority_names[] = { + [GUC_SCHED_PRIORITY_LOW] = "low", + [GUC_SCHED_PRIORITY_NORMAL] = "normal", + [GUC_SCHED_PRIORITY_HIGH] = "high", +}; + +static bool sched_priority_change_allowed(unsigned int vfid) +{ + /* As of today GuC FW allows to selectively change only the PF priority. */ + return vfid == PFID; +} + +static bool sched_priority_high_allowed(unsigned int vfid) +{ + /* As of today GuC FW allows to select 'high' priority only for the PF. */ + return vfid == PFID; +} + +static bool sched_priority_bulk_high_allowed(struct xe_device *xe) +{ + /* all VFs are equal - it's sufficient to check VF1 only */ + return sched_priority_high_allowed(VFID(1)); +} + +static ssize_t xe_sriov_dev_attr_sched_priority_store(struct xe_device *xe, + const char *buf, size_t count) +{ + size_t num_priorities = ARRAY_SIZE(sched_priority_names); + int match; + int err; + + if (!sched_priority_bulk_high_allowed(xe)) + num_priorities--; + + match = __sysfs_match_string(sched_priority_names, num_priorities, buf); + if (match < 0) + return -EINVAL; + + err = xe_sriov_pf_provision_bulk_apply_priority(xe, match); + return err ?: count; +} + +static XE_SRIOV_DEV_ATTR_WO(sched_priority); + +static struct attribute *bulk_profile_dev_attrs[] = { + &xe_sriov_dev_attr_exec_quantum_ms.attr, + &xe_sriov_dev_attr_preempt_timeout_us.attr, + &xe_sriov_dev_attr_sched_priority.attr, + NULL +}; + +static const struct attribute_group bulk_profile_dev_attr_group = { + .name = ".bulk_profile", + .attrs = bulk_profile_dev_attrs, +}; + +static const struct attribute_group *xe_sriov_dev_attr_groups[] = { + &bulk_profile_dev_attr_group, + NULL +}; + +/* and VF-level attributes go here */ + +#define DEFINE_SIMPLE_PROVISIONING_SRIOV_VF_ATTR(NAME, ITEM, TYPE, FORMAT) \ +static ssize_t xe_sriov_vf_attr_##NAME##_show(struct xe_device *xe, unsigned int vfid, \ + char *buf) \ +{ \ + TYPE value = 0; \ + int err; \ + \ + err = xe_sriov_pf_provision_query_vf_##ITEM(xe, vfid, &value); \ + if (err) \ + return err; \ + \ + return sysfs_emit(buf, FORMAT, value); \ +} \ + \ +static ssize_t xe_sriov_vf_attr_##NAME##_store(struct xe_device *xe, unsigned int vfid, \ + const char *buf, size_t count) \ +{ \ + TYPE value; \ + int err; \ + \ + err = kstrto##TYPE(buf, 0, &value); \ + if (err) \ + return err; \ + \ + err = xe_sriov_pf_provision_apply_vf_##ITEM(xe, vfid, value); \ + return err ?: count; \ +} \ + \ +static XE_SRIOV_VF_ATTR(NAME) + +DEFINE_SIMPLE_PROVISIONING_SRIOV_VF_ATTR(exec_quantum_ms, eq, u32, "%u\n"); +DEFINE_SIMPLE_PROVISIONING_SRIOV_VF_ATTR(preempt_timeout_us, pt, u32, "%u\n"); + +static ssize_t xe_sriov_vf_attr_sched_priority_show(struct xe_device *xe, unsigned int vfid, + char *buf) +{ + size_t num_priorities = ARRAY_SIZE(sched_priority_names); + u32 priority; + int err; + + err = xe_sriov_pf_provision_query_vf_priority(xe, vfid, &priority); + if (err) + return err; + + if (!sched_priority_high_allowed(vfid)) + num_priorities--; + + xe_assert(xe, priority < num_priorities); + return emit_choice(buf, priority, sched_priority_names, num_priorities); +} + +static ssize_t xe_sriov_vf_attr_sched_priority_store(struct xe_device *xe, unsigned int vfid, + const char *buf, size_t count) +{ + size_t num_priorities = ARRAY_SIZE(sched_priority_names); + int match; + int err; + + if (!sched_priority_change_allowed(vfid)) + return -EOPNOTSUPP; + + if (!sched_priority_high_allowed(vfid)) + num_priorities--; + + match = __sysfs_match_string(sched_priority_names, num_priorities, buf); + if (match < 0) + return -EINVAL; + + err = xe_sriov_pf_provision_apply_vf_priority(xe, vfid, match); + return err ?: count; +} + +static XE_SRIOV_VF_ATTR(sched_priority); + +static struct attribute *profile_vf_attrs[] = { + &xe_sriov_vf_attr_exec_quantum_ms.attr, + &xe_sriov_vf_attr_preempt_timeout_us.attr, + &xe_sriov_vf_attr_sched_priority.attr, + NULL +}; + +static umode_t profile_vf_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj); + + if (attr == &xe_sriov_vf_attr_sched_priority.attr && + !sched_priority_change_allowed(vkobj->vfid)) + return attr->mode & 0444; + + return attr->mode; +} + +static const struct attribute_group profile_vf_attr_group = { + .name = "profile", + .attrs = profile_vf_attrs, + .is_visible = profile_vf_attr_is_visible, +}; + +#define DEFINE_SIMPLE_CONTROL_SRIOV_VF_ATTR(NAME) \ + \ +static ssize_t xe_sriov_vf_attr_##NAME##_store(struct xe_device *xe, unsigned int vfid, \ + const char *buf, size_t count) \ +{ \ + bool yes; \ + int err; \ + \ + if (!vfid) \ + return -EPERM; \ + \ + err = kstrtobool(buf, &yes); \ + if (err) \ + return err; \ + if (!yes) \ + return count; \ + \ + err = xe_sriov_pf_control_##NAME##_vf(xe, vfid); \ + return err ?: count; \ +} \ + \ +static XE_SRIOV_VF_ATTR_WO(NAME) + +DEFINE_SIMPLE_CONTROL_SRIOV_VF_ATTR(stop); + +static struct attribute *control_vf_attrs[] = { + &xe_sriov_vf_attr_stop.attr, + NULL +}; + +static umode_t control_vf_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj); + + if (vkobj->vfid == PFID) + return 0; + + return attr->mode; +} + +static const struct attribute_group control_vf_attr_group = { + .attrs = control_vf_attrs, + .is_visible = control_vf_attr_is_visible, +}; + +static const struct attribute_group *xe_sriov_vf_attr_groups[] = { + &profile_vf_attr_group, + &control_vf_attr_group, + NULL +}; + +/* no user serviceable parts below */ + +static struct kobject *create_xe_sriov_kobj(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_kobj *vkobj; + + xe_sriov_pf_assert_vfid(xe, vfid); + + vkobj = kzalloc(sizeof(*vkobj), GFP_KERNEL); + if (!vkobj) + return NULL; + + vkobj->xe = xe; + vkobj->vfid = vfid; + return &vkobj->base; +} + +static void release_xe_sriov_kobj(struct kobject *kobj) +{ + struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj); + + kfree(vkobj); +} + +static ssize_t xe_sriov_dev_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct xe_sriov_dev_attr *vattr = to_xe_sriov_dev_attr(attr); + struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj); + struct xe_device *xe = vkobj->xe; + + if (!vattr->show) + return -EPERM; + + return vattr->show(xe, buf); +} + +static ssize_t xe_sriov_dev_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct xe_sriov_dev_attr *vattr = to_xe_sriov_dev_attr(attr); + struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj); + struct xe_device *xe = vkobj->xe; + ssize_t ret; + + if (!vattr->store) + return -EPERM; + + xe_pm_runtime_get(xe); + ret = xe_sriov_pf_wait_ready(xe) ?: vattr->store(xe, buf, count); + xe_pm_runtime_put(xe); + + return ret; +} + +static ssize_t xe_sriov_vf_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct xe_sriov_vf_attr *vattr = to_xe_sriov_vf_attr(attr); + struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj); + struct xe_device *xe = vkobj->xe; + unsigned int vfid = vkobj->vfid; + + xe_sriov_pf_assert_vfid(xe, vfid); + + if (!vattr->show) + return -EPERM; + + return vattr->show(xe, vfid, buf); +} + +static ssize_t xe_sriov_vf_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct xe_sriov_vf_attr *vattr = to_xe_sriov_vf_attr(attr); + struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj); + struct xe_device *xe = vkobj->xe; + unsigned int vfid = vkobj->vfid; + ssize_t ret; + + xe_sriov_pf_assert_vfid(xe, vfid); + + if (!vattr->store) + return -EPERM; + + xe_pm_runtime_get(xe); + ret = xe_sriov_pf_wait_ready(xe) ?: vattr->store(xe, vfid, buf, count); + xe_pm_runtime_get(xe); + + return ret; +} + +static const struct sysfs_ops xe_sriov_dev_sysfs_ops = { + .show = xe_sriov_dev_attr_show, + .store = xe_sriov_dev_attr_store, +}; + +static const struct sysfs_ops xe_sriov_vf_sysfs_ops = { + .show = xe_sriov_vf_attr_show, + .store = xe_sriov_vf_attr_store, +}; + +static const struct kobj_type xe_sriov_dev_ktype = { + .release = release_xe_sriov_kobj, + .sysfs_ops = &xe_sriov_dev_sysfs_ops, + .default_groups = xe_sriov_dev_attr_groups, +}; + +static const struct kobj_type xe_sriov_vf_ktype = { + .release = release_xe_sriov_kobj, + .sysfs_ops = &xe_sriov_vf_sysfs_ops, + .default_groups = xe_sriov_vf_attr_groups, +}; + +static int pf_sysfs_error(struct xe_device *xe, int err, const char *what) +{ + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) + xe_sriov_dbg(xe, "Failed to setup sysfs %s (%pe)\n", what, ERR_PTR(err)); + return err; +} + +static void pf_sysfs_note(struct xe_device *xe, int err, const char *what) +{ + xe_sriov_dbg(xe, "Failed to setup sysfs %s (%pe)\n", what, ERR_PTR(err)); +} + +static void action_put_kobject(void *arg) +{ + struct kobject *kobj = arg; + + kobject_put(kobj); +} + +static int pf_setup_root(struct xe_device *xe) +{ + struct kobject *parent = &xe->drm.dev->kobj; + struct kobject *root; + int err; + + root = create_xe_sriov_kobj(xe, PFID); + if (!root) + return pf_sysfs_error(xe, -ENOMEM, "root obj"); + + err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, root); + if (err) + return pf_sysfs_error(xe, err, "root action"); + + err = kobject_init_and_add(root, &xe_sriov_dev_ktype, parent, "sriov_admin"); + if (err) + return pf_sysfs_error(xe, err, "root init"); + + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, !xe->sriov.pf.sysfs.root); + xe->sriov.pf.sysfs.root = root; + return 0; +} + +static int pf_setup_tree(struct xe_device *xe) +{ + unsigned int totalvfs = xe_sriov_pf_get_totalvfs(xe); + struct kobject *root, *kobj; + unsigned int n; + int err; + + xe_assert(xe, IS_SRIOV_PF(xe)); + root = xe->sriov.pf.sysfs.root; + + for (n = 0; n <= totalvfs; n++) { + kobj = create_xe_sriov_kobj(xe, VFID(n)); + if (!kobj) + return pf_sysfs_error(xe, -ENOMEM, "tree obj"); + + err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, root); + if (err) + return pf_sysfs_error(xe, err, "tree action"); + + if (n) + err = kobject_init_and_add(kobj, &xe_sriov_vf_ktype, + root, "vf%u", n); + else + err = kobject_init_and_add(kobj, &xe_sriov_vf_ktype, + root, "pf"); + if (err) + return pf_sysfs_error(xe, err, "tree init"); + + xe_assert(xe, !xe->sriov.pf.vfs[n].kobj); + xe->sriov.pf.vfs[n].kobj = kobj; + } + + return 0; +} + +static void action_rm_device_link(void *arg) +{ + struct kobject *kobj = arg; + + sysfs_remove_link(kobj, "device"); +} + +static int pf_link_pf_device(struct xe_device *xe) +{ + struct kobject *kobj = xe->sriov.pf.vfs[PFID].kobj; + int err; + + err = sysfs_create_link(kobj, &xe->drm.dev->kobj, "device"); + if (err) + return pf_sysfs_error(xe, err, "PF device link"); + + err = devm_add_action_or_reset(xe->drm.dev, action_rm_device_link, kobj); + if (err) + return pf_sysfs_error(xe, err, "PF unlink action"); + + return 0; +} + +/** + * xe_sriov_pf_sysfs_init() - Setup PF's SR-IOV sysfs tree. + * @xe: the PF &xe_device to setup sysfs + * + * This function will create additional nodes that will represent PF and VFs + * devices, each populated with SR-IOV Xe specific attributes. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_sysfs_init(struct xe_device *xe) +{ + int err; + + err = pf_setup_root(xe); + if (err) + return err; + + err = pf_setup_tree(xe); + if (err) + return err; + + err = pf_link_pf_device(xe); + if (err) + return err; + + return 0; +} + +/** + * xe_sriov_pf_sysfs_link_vfs() - Add VF's links in SR-IOV sysfs tree. + * @xe: the &xe_device where to update sysfs + * @num_vfs: number of enabled VFs to link + * + * This function is specific for the PF driver. + * + * This function will add symbolic links between VFs represented in the SR-IOV + * sysfs tree maintained by the PF and enabled VF PCI devices. + * + * The @xe_sriov_pf_sysfs_unlink_vfs() shall be used to remove those links. + */ +void xe_sriov_pf_sysfs_link_vfs(struct xe_device *xe, unsigned int num_vfs) +{ + unsigned int totalvfs = xe_sriov_pf_get_totalvfs(xe); + struct pci_dev *pf_pdev = to_pci_dev(xe->drm.dev); + struct pci_dev *vf_pdev = NULL; + unsigned int n; + int err; + + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, num_vfs <= totalvfs); + + for (n = 1; n <= num_vfs; n++) { + vf_pdev = xe_pci_sriov_get_vf_pdev(pf_pdev, VFID(n)); + if (!vf_pdev) + return pf_sysfs_note(xe, -ENOENT, "VF link"); + + err = sysfs_create_link(xe->sriov.pf.vfs[VFID(n)].kobj, + &vf_pdev->dev.kobj, "device"); + + /* must balance xe_pci_sriov_get_vf_pdev() */ + pci_dev_put(vf_pdev); + + if (err) + return pf_sysfs_note(xe, err, "VF link"); + } +} + +/** + * xe_sriov_pf_sysfs_unlink_vfs() - Remove VF's links from SR-IOV sysfs tree. + * @xe: the &xe_device where to update sysfs + * @num_vfs: number of VFs to unlink + * + * This function shall be called only on the PF. + * This function will remove "device" links added by @xe_sriov_sysfs_link_vfs(). + */ +void xe_sriov_pf_sysfs_unlink_vfs(struct xe_device *xe, unsigned int num_vfs) +{ + unsigned int n; + + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, num_vfs <= xe_sriov_pf_get_totalvfs(xe)); + + for (n = 1; n <= num_vfs; n++) + sysfs_remove_link(xe->sriov.pf.vfs[VFID(n)].kobj, "device"); +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.h b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.h new file mode 100644 index 000000000000..ae92ed1766e7 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_SYSFS_H_ +#define _XE_SRIOV_PF_SYSFS_H_ + +struct xe_device; + +int xe_sriov_pf_sysfs_init(struct xe_device *xe); + +void xe_sriov_pf_sysfs_link_vfs(struct xe_device *xe, unsigned int num_vfs); +void xe_sriov_pf_sysfs_unlink_vfs(struct xe_device *xe, unsigned int num_vfs); + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_types.h index 956a88f9f213..b0253e1ae5da 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_types.h @@ -9,14 +9,24 @@ #include <linux/mutex.h> #include <linux/types.h> +#include "xe_guard.h" +#include "xe_sriov_pf_migration_types.h" +#include "xe_sriov_pf_provision_types.h" #include "xe_sriov_pf_service_types.h" +struct kobject; + /** * struct xe_sriov_metadata - per-VF device level metadata */ struct xe_sriov_metadata { + /** @kobj: kobject representing VF in PF's SR-IOV sysfs tree. */ + struct kobject *kobj; + /** @version: negotiated VF/PF ABI version */ struct xe_sriov_pf_service_version version; + /** @migration: migration state */ + struct xe_sriov_migration_state migration; }; /** @@ -32,12 +42,27 @@ struct xe_device_pf { /** @driver_max_vfs: Maximum number of VFs supported by the driver. */ u16 driver_max_vfs; + /** @guard_vfs_enabling: guards VFs enabling */ + struct xe_guard guard_vfs_enabling; + /** @master_lock: protects all VFs configurations across GTs */ struct mutex master_lock; + /** @provision: device level provisioning data. */ + struct xe_sriov_pf_provision provision; + + /** @migration: device level migration data. */ + struct xe_sriov_pf_migration migration; + /** @service: device level service data. */ struct xe_sriov_pf_service service; + /** @sysfs: device level sysfs data. */ + struct { + /** @sysfs.root: the root kobject for all SR-IOV entries in sysfs. */ + struct kobject *root; + } sysfs; + /** @vfs: metadata for all VFs. */ struct xe_sriov_metadata *vfs; }; diff --git a/drivers/gpu/drm/xe/xe_sriov_printk.h b/drivers/gpu/drm/xe/xe_sriov_printk.h index 117e1d541692..4c6b5c3d2190 100644 --- a/drivers/gpu/drm/xe/xe_sriov_printk.h +++ b/drivers/gpu/drm/xe/xe_sriov_printk.h @@ -1,22 +1,22 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright © 2023 Intel Corporation + * Copyright © 2023-2025 Intel Corporation */ #ifndef _XE_SRIOV_PRINTK_H_ #define _XE_SRIOV_PRINTK_H_ -#include <drm/drm_print.h> - -#include "xe_device_types.h" -#include "xe_sriov_types.h" +#include "xe_printk.h" #define xe_sriov_printk_prefix(xe) \ ((xe)->sriov.__mode == XE_SRIOV_MODE_PF ? "PF: " : \ (xe)->sriov.__mode == XE_SRIOV_MODE_VF ? "VF: " : "") +#define __XE_SRIOV_PRINTK_FMT(_xe, _fmt, _args...) \ + "%s" _fmt, xe_sriov_printk_prefix(_xe), ##_args + #define xe_sriov_printk(xe, _level, fmt, ...) \ - drm_##_level(&(xe)->drm, "%s" fmt, xe_sriov_printk_prefix(xe), ##__VA_ARGS__) + xe_##_level((xe), __XE_SRIOV_PRINTK_FMT((xe), fmt, ##__VA_ARGS__)) #define xe_sriov_err(xe, fmt, ...) \ xe_sriov_printk((xe), err, fmt, ##__VA_ARGS__) diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c index cdd9f8e78b2a..284ce37ca92d 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf.c @@ -6,22 +6,12 @@ #include <drm/drm_debugfs.h> #include <drm/drm_managed.h> -#include "xe_assert.h" -#include "xe_device.h" #include "xe_gt.h" -#include "xe_gt_sriov_printk.h" #include "xe_gt_sriov_vf.h" #include "xe_guc.h" -#include "xe_guc_ct.h" -#include "xe_guc_submit.h" -#include "xe_irq.h" -#include "xe_lrc.h" -#include "xe_pm.h" -#include "xe_sriov.h" #include "xe_sriov_printk.h" #include "xe_sriov_vf.h" #include "xe_sriov_vf_ccs.h" -#include "xe_tile_sriov_vf.h" /** * DOC: VF restore procedure in PF KMD and VF KMD @@ -140,10 +130,15 @@ bool xe_sriov_vf_migration_supported(struct xe_device *xe) { xe_assert(xe, IS_SRIOV_VF(xe)); - return xe->sriov.vf.migration.enabled; + return !xe->sriov.vf.migration.disabled; } -static void vf_disable_migration(struct xe_device *xe, const char *fmt, ...) +/** + * xe_sriov_vf_migration_disable - Turn off VF migration with given log message. + * @xe: the &xe_device instance. + * @fmt: format string for the log message, to be combined with following VAs. + */ +void xe_sriov_vf_migration_disable(struct xe_device *xe, const char *fmt, ...) { struct va_format vaf; va_list va_args; @@ -156,39 +151,14 @@ static void vf_disable_migration(struct xe_device *xe, const char *fmt, ...) xe_sriov_notice(xe, "migration disabled: %pV\n", &vaf); va_end(va_args); - xe->sriov.vf.migration.enabled = false; + xe->sriov.vf.migration.disabled = true; } -static void migration_worker_func(struct work_struct *w); - static void vf_migration_init_early(struct xe_device *xe) { - /* - * TODO: Add conditions to allow specific platforms, when they're - * supported at production quality. - */ - if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG)) - return vf_disable_migration(xe, - "experimental feature not available on production builds"); - - if (GRAPHICS_VER(xe) < 20) - return vf_disable_migration(xe, "requires gfx version >= 20, but only %u found", - GRAPHICS_VER(xe)); - - if (!IS_DGFX(xe)) { - struct xe_uc_fw_version guc_version; - - xe_gt_sriov_vf_guc_versions(xe_device_get_gt(xe, 0), NULL, &guc_version); - if (MAKE_GUC_VER_STRUCT(guc_version) < MAKE_GUC_VER(1, 23, 0)) - return vf_disable_migration(xe, - "CCS migration requires GuC ABI >= 1.23 but only %u.%u found", - guc_version.major, guc_version.minor); - } + if (!xe_device_has_memirq(xe)) + return xe_sriov_vf_migration_disable(xe, "requires memory-based IRQ support"); - INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); - - xe->sriov.vf.migration.enabled = true; - xe_sriov_dbg(xe, "migration support enabled\n"); } /** @@ -201,235 +171,6 @@ void xe_sriov_vf_init_early(struct xe_device *xe) } /** - * vf_post_migration_shutdown - Stop the driver activities after VF migration. - * @xe: the &xe_device struct instance - * - * After this VM is migrated and assigned to a new VF, it is running on a new - * hardware, and therefore many hardware-dependent states and related structures - * require fixups. Without fixups, the hardware cannot do any work, and therefore - * all GPU pipelines are stalled. - * Stop some of kernel activities to make the fixup process faster. - */ -static void vf_post_migration_shutdown(struct xe_device *xe) -{ - struct xe_gt *gt; - unsigned int id; - int ret = 0; - - for_each_gt(gt, xe, id) { - xe_guc_submit_pause(>->uc.guc); - ret |= xe_guc_submit_reset_block(>->uc.guc); - } - - if (ret) - drm_info(&xe->drm, "migration recovery encountered ongoing reset\n"); -} - -/** - * vf_post_migration_kickstart - Re-start the driver activities under new hardware. - * @xe: the &xe_device struct instance - * - * After we have finished with all post-migration fixups, restart the driver - * activities to continue feeding the GPU with workloads. - */ -static void vf_post_migration_kickstart(struct xe_device *xe) -{ - struct xe_gt *gt; - unsigned int id; - - /* - * Make sure interrupts on the new HW are properly set. The GuC IRQ - * must be working at this point, since the recovery did started, - * but the rest was not enabled using the procedure from spec. - */ - xe_irq_resume(xe); - - for_each_gt(gt, xe, id) { - xe_guc_submit_reset_unblock(>->uc.guc); - xe_guc_submit_unpause(>->uc.guc); - } -} - -static bool gt_vf_post_migration_needed(struct xe_gt *gt) -{ - return test_bit(gt->info.id, >_to_xe(gt)->sriov.vf.migration.gt_flags); -} - -/* - * Notify GuCs marked in flags about resource fixups apply finished. - * @xe: the &xe_device struct instance - * @gt_flags: flags marking to which GTs the notification shall be sent - */ -static int vf_post_migration_notify_resfix_done(struct xe_device *xe, unsigned long gt_flags) -{ - struct xe_gt *gt; - unsigned int id; - int err = 0; - - for_each_gt(gt, xe, id) { - if (!test_bit(id, >_flags)) - continue; - /* skip asking GuC for RESFIX exit if new recovery request arrived */ - if (gt_vf_post_migration_needed(gt)) - continue; - err = xe_gt_sriov_vf_notify_resfix_done(gt); - if (err) - break; - clear_bit(id, >_flags); - } - - if (gt_flags && !err) - drm_dbg(&xe->drm, "another recovery imminent, skipped some notifications\n"); - return err; -} - -static int vf_get_next_migrated_gt_id(struct xe_device *xe) -{ - struct xe_gt *gt; - unsigned int id; - - for_each_gt(gt, xe, id) { - if (test_and_clear_bit(id, &xe->sriov.vf.migration.gt_flags)) - return id; - } - return -1; -} - -static size_t post_migration_scratch_size(struct xe_device *xe) -{ - return max(xe_lrc_reg_size(xe), LRC_WA_BB_SIZE); -} - -/** - * Perform post-migration fixups on a single GT. - * - * After migration, GuC needs to be re-queried for VF configuration to check - * if it matches previous provisioning. Most of VF provisioning shall be the - * same, except GGTT range, since GGTT is not virtualized per-VF. If GGTT - * range has changed, we have to perform fixups - shift all GGTT references - * used anywhere within the driver. After the fixups in this function succeed, - * it is allowed to ask the GuC bound to this GT to continue normal operation. - * - * Returns: 0 if the operation completed successfully, or a negative error - * code otherwise. - */ -static int gt_vf_post_migration_fixups(struct xe_gt *gt) -{ - s64 shift; - void *buf; - int err; - - buf = kmalloc(post_migration_scratch_size(gt_to_xe(gt)), GFP_KERNEL); - if (!buf) - return -ENOMEM; - - err = xe_gt_sriov_vf_query_config(gt); - if (err) - goto out; - - shift = xe_gt_sriov_vf_ggtt_shift(gt); - if (shift) { - xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift); - xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt); - err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf); - if (err) - goto out; - xe_guc_jobs_ring_rebase(>->uc.guc); - xe_guc_ct_fixup_messages_with_ggtt(>->uc.guc.ct, shift); - } - -out: - kfree(buf); - return err; -} - -static void vf_post_migration_recovery(struct xe_device *xe) -{ - unsigned long fixed_gts = 0; - int id, err; - - drm_dbg(&xe->drm, "migration recovery in progress\n"); - xe_pm_runtime_get(xe); - vf_post_migration_shutdown(xe); - - if (!xe_sriov_vf_migration_supported(xe)) { - xe_sriov_err(xe, "migration is not supported\n"); - err = -ENOTRECOVERABLE; - goto fail; - } - - while (id = vf_get_next_migrated_gt_id(xe), id >= 0) { - struct xe_gt *gt = xe_device_get_gt(xe, id); - - err = gt_vf_post_migration_fixups(gt); - if (err) - goto fail; - - set_bit(id, &fixed_gts); - } - - vf_post_migration_kickstart(xe); - err = vf_post_migration_notify_resfix_done(xe, fixed_gts); - if (err) - goto fail; - - xe_pm_runtime_put(xe); - drm_notice(&xe->drm, "migration recovery ended\n"); - return; -fail: - xe_pm_runtime_put(xe); - drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err)); - xe_device_declare_wedged(xe); -} - -static void migration_worker_func(struct work_struct *w) -{ - struct xe_device *xe = container_of(w, struct xe_device, - sriov.vf.migration.worker); - - vf_post_migration_recovery(xe); -} - -/* - * Check if post-restore recovery is coming on any of GTs. - * @xe: the &xe_device struct instance - * - * Return: True if migration recovery worker will soon be running. Any worker currently - * executing does not affect the result. - */ -static bool vf_ready_to_recovery_on_any_gts(struct xe_device *xe) -{ - struct xe_gt *gt; - unsigned int id; - - for_each_gt(gt, xe, id) { - if (test_bit(id, &xe->sriov.vf.migration.gt_flags)) - return true; - } - return false; -} - -/** - * xe_sriov_vf_start_migration_recovery - Start VF migration recovery. - * @xe: the &xe_device to start recovery on - * - * This function shall be called only by VF. - */ -void xe_sriov_vf_start_migration_recovery(struct xe_device *xe) -{ - bool started; - - xe_assert(xe, IS_SRIOV_VF(xe)); - - if (!vf_ready_to_recovery_on_any_gts(xe)) - return; - - started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker); - drm_info(&xe->drm, "VF migration recovery %s\n", started ? - "scheduled" : "already in progress"); -} - -/** * xe_sriov_vf_init_late() - SR-IOV VF late initialization functions. * @xe: the &xe_device to initialize * @@ -439,12 +180,7 @@ void xe_sriov_vf_start_migration_recovery(struct xe_device *xe) */ int xe_sriov_vf_init_late(struct xe_device *xe) { - int err = 0; - - if (xe_sriov_vf_migration_supported(xe)) - err = xe_sriov_vf_ccs_init(xe); - - return err; + return xe_sriov_vf_ccs_init(xe); } static int sa_info_vf_ccs(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h index 9e752105ec2a..e967d4166a43 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf.h +++ b/drivers/gpu/drm/xe/xe_sriov_vf.h @@ -13,8 +13,8 @@ struct xe_device; void xe_sriov_vf_init_early(struct xe_device *xe); int xe_sriov_vf_init_late(struct xe_device *xe); -void xe_sriov_vf_start_migration_recovery(struct xe_device *xe); bool xe_sriov_vf_migration_supported(struct xe_device *xe); +void xe_sriov_vf_migration_disable(struct xe_device *xe, const char *fmt, ...); void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root); #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c index 8dec616c37c9..797a4b866226 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c @@ -10,6 +10,8 @@ #include "xe_device.h" #include "xe_exec_queue.h" #include "xe_exec_queue_types.h" +#include "xe_gt_sriov_vf.h" +#include "xe_guc.h" #include "xe_guc_submit.h" #include "xe_lrc.h" #include "xe_migrate.h" @@ -175,6 +177,15 @@ static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx) struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q); u32 dw[10], i = 0; + /* + * XXX: Save/restore fixes — for some reason, the GuC only accepts the + * save/restore context if the LRC head pointer is zero. This is evident + * from repeated VF migrations failing when the LRC head pointer is + * non-zero. + */ + lrc->ring.tail = 0; + xe_lrc_set_ring_head(lrc, 0); + dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE; dw[i++] = MI_BATCH_BUFFER_START | XE_INSTR_NUM_DW(3); dw[i++] = lower_32_bits(addr); @@ -186,6 +197,25 @@ static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx) xe_lrc_set_ring_tail(lrc, lrc->ring.tail); } +/** + * xe_sriov_vf_ccs_rebase - Rebase GGTT addresses for CCS save / restore + * @xe: the &xe_device. + */ +void xe_sriov_vf_ccs_rebase(struct xe_device *xe) +{ + enum xe_sriov_vf_ccs_rw_ctxs ctx_id; + + if (!IS_VF_CCS_READY(xe)) + return; + + for_each_ccs_rw_ctx(ctx_id) { + struct xe_sriov_vf_ccs_ctx *ctx = + &xe->sriov.vf.ccs.contexts[ctx_id]; + + ccs_rw_update_ring(ctx); + } +} + static int register_save_restore_context(struct xe_sriov_vf_ccs_ctx *ctx) { int ctx_type; @@ -232,6 +262,45 @@ int xe_sriov_vf_ccs_register_context(struct xe_device *xe) return err; } +/* + * Whether GuC requires CCS copy BBs for VF migration. + * @xe: the &xe_device instance. + * + * Only selected platforms require VF KMD to maintain CCS copy BBs and linked LRCAs. + * + * Return: true if VF driver must participate in the CCS migration, false otherwise. + */ +static bool vf_migration_ccs_bb_needed(struct xe_device *xe) +{ + xe_assert(xe, IS_SRIOV_VF(xe)); + + return !IS_DGFX(xe) && xe_device_has_flat_ccs(xe); +} + +/* + * Check for disable migration due to no CCS BBs support in GuC FW. + * @xe: the &xe_device instance. + * + * Performs late disable of VF migration feature in case GuC FW cannot support it. + * + * Returns: True if VF migration with CCS BBs is supported, false otherwise. + */ +static bool vf_migration_ccs_bb_support_check(struct xe_device *xe) +{ + struct xe_gt *gt = xe_root_mmio_gt(xe); + struct xe_uc_fw_version guc_version; + + xe_gt_sriov_vf_guc_versions(gt, NULL, &guc_version); + if (MAKE_GUC_VER_STRUCT(guc_version) < MAKE_GUC_VER(1, 23, 0)) { + xe_sriov_vf_migration_disable(xe, + "CCS migration requires GuC ABI >= 1.23 but only %u.%u found", + guc_version.major, guc_version.minor); + return false; + } + + return true; +} + static void xe_sriov_vf_ccs_fini(void *arg) { struct xe_sriov_vf_ccs_ctx *ctx = arg; @@ -264,9 +333,10 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe) int err; xe_assert(xe, IS_SRIOV_VF(xe)); - xe_assert(xe, xe_sriov_vf_migration_supported(xe)); - if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)) + if (!xe_sriov_vf_migration_supported(xe) || + !vf_migration_ccs_bb_needed(xe) || + !vf_migration_ccs_bb_support_check(xe)) return 0; for_each_ccs_rw_ctx(ctx_id) { diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h index 0745c0ff0228..f8ca6efce9ee 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h @@ -18,6 +18,7 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe); int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo); int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo); int xe_sriov_vf_ccs_register_context(struct xe_device *xe); +void xe_sriov_vf_ccs_rebase(struct xe_device *xe); void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p); static inline bool xe_sriov_vf_ccs_ready(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h index 426cc5841958..d5f72d667817 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h @@ -33,15 +33,11 @@ struct xe_device_vf { /** @migration: VF Migration state data */ struct { - /** @migration.worker: VF migration recovery worker */ - struct work_struct worker; - /** @migration.gt_flags: Per-GT request flags for VF migration recovery */ - unsigned long gt_flags; /** - * @migration.enabled: flag indicating if migration support - * was enabled or not due to missing prerequisites + * @migration.disabled: flag indicating if migration support + * was turned off due to missing prerequisites */ - bool enabled; + bool disabled; } migration; /** @ccs: VF CCS state data */ diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 129e7818565c..55c5a0eb82e1 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -104,8 +104,7 @@ xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range, &vm->svm.garbage_collector.range_list); spin_unlock(&vm->svm.garbage_collector.lock); - queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq, - &vm->svm.garbage_collector.work); + queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work); } static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt) @@ -633,7 +632,7 @@ err_out: /* * XXX: We can't derive the GT here (or anywhere in this functions, but - * compute always uses the primary GT so accumlate stats on the likely + * compute always uses the primary GT so accumulate stats on the likely * GT of the fault. */ if (gt) diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index 82872a51f098..ff74528ca0c6 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -14,7 +14,7 @@ #include <drm/drm_syncobj.h> #include <uapi/drm/xe_drm.h> -#include "xe_device_types.h" +#include "xe_device.h" #include "xe_exec_queue.h" #include "xe_macros.h" #include "xe_sched_job_types.h" @@ -113,6 +113,8 @@ static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, struct xe_sync_entry *sync, struct drm_xe_sync __user *sync_user, + struct drm_syncobj *ufence_syncobj, + u64 ufence_timeline_value, unsigned int flags) { struct drm_xe_sync sync_in; @@ -192,10 +194,15 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, if (exec) { sync->addr = sync_in.addr; } else { + sync->ufence_timeline_value = ufence_timeline_value; sync->ufence = user_fence_create(xe, sync_in.addr, sync_in.timeline_value); if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence))) return PTR_ERR(sync->ufence); + sync->ufence_chain_fence = dma_fence_chain_alloc(); + if (!sync->ufence_chain_fence) + return -ENOMEM; + sync->ufence_syncobj = ufence_syncobj; } break; @@ -239,7 +246,12 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence) } else if (sync->ufence) { int err; - dma_fence_get(fence); + drm_syncobj_add_point(sync->ufence_syncobj, + sync->ufence_chain_fence, + fence, sync->ufence_timeline_value); + sync->ufence_chain_fence = NULL; + + fence = drm_syncobj_fence_get(sync->ufence_syncobj); user_fence_get(sync->ufence); err = dma_fence_add_callback(fence, &sync->ufence->cb, user_fence_cb); @@ -259,7 +271,8 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync) drm_syncobj_put(sync->syncobj); dma_fence_put(sync->fence); dma_fence_chain_free(sync->chain_fence); - if (sync->ufence) + dma_fence_chain_free(sync->ufence_chain_fence); + if (!IS_ERR_OR_NULL(sync->ufence)) user_fence_put(sync->ufence); } @@ -284,51 +297,59 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, struct dma_fence **fences = NULL; struct dma_fence_array *cf = NULL; struct dma_fence *fence; - int i, num_in_fence = 0, current_fence = 0; + int i, num_fence = 0, current_fence = 0; lockdep_assert_held(&vm->lock); - /* Count in-fences */ - for (i = 0; i < num_sync; ++i) { - if (sync[i].fence) { - ++num_in_fence; - fence = sync[i].fence; + /* Reject in fences */ + for (i = 0; i < num_sync; ++i) + if (sync[i].fence) + return ERR_PTR(-EOPNOTSUPP); + + if (q->flags & EXEC_QUEUE_FLAG_VM) { + struct xe_exec_queue *__q; + struct xe_tile *tile; + u8 id; + + for_each_tile(tile, vm->xe, id) + num_fence += (1 + XE_MAX_GT_PER_TILE); + + fences = kmalloc_array(num_fence, sizeof(*fences), + GFP_KERNEL); + if (!fences) + return ERR_PTR(-ENOMEM); + + fences[current_fence++] = + xe_exec_queue_last_fence_get(q, vm); + for_each_tlb_inval(i) + fences[current_fence++] = + xe_exec_queue_tlb_inval_last_fence_get(q, vm, i); + list_for_each_entry(__q, &q->multi_gt_list, + multi_gt_link) { + fences[current_fence++] = + xe_exec_queue_last_fence_get(__q, vm); + for_each_tlb_inval(i) + fences[current_fence++] = + xe_exec_queue_tlb_inval_last_fence_get(__q, vm, i); } - } - /* Easy case... */ - if (!num_in_fence) { - fence = xe_exec_queue_last_fence_get(q, vm); - return fence; - } + xe_assert(vm->xe, current_fence == num_fence); + cf = dma_fence_array_create(num_fence, fences, + dma_fence_context_alloc(1), + 1, false); + if (!cf) + goto err_out; - /* Create composite fence */ - fences = kmalloc_array(num_in_fence + 1, sizeof(*fences), GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); - for (i = 0; i < num_sync; ++i) { - if (sync[i].fence) { - dma_fence_get(sync[i].fence); - fences[current_fence++] = sync[i].fence; - } - } - fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm); - cf = dma_fence_array_create(num_in_fence, fences, - vm->composite_fence_ctx, - vm->composite_fence_seqno++, - false); - if (!cf) { - --vm->composite_fence_seqno; - goto err_out; + return &cf->base; } - return &cf->base; + fence = xe_exec_queue_last_fence_get(q, vm); + return fence; err_out: while (current_fence) dma_fence_put(fences[--current_fence]); kfree(fences); - kfree(cf); return ERR_PTR(-ENOMEM); } diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h index 256ffc1e54dc..51f2d803e977 100644 --- a/drivers/gpu/drm/xe/xe_sync.h +++ b/drivers/gpu/drm/xe/xe_sync.h @@ -8,6 +8,7 @@ #include "xe_sync_types.h" +struct drm_syncobj; struct xe_device; struct xe_exec_queue; struct xe_file; @@ -21,6 +22,8 @@ struct xe_vm; int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, struct xe_sync_entry *sync, struct drm_xe_sync __user *sync_user, + struct drm_syncobj *ufence_syncobj, + u64 ufence_timeline_value, unsigned int flags); int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job); diff --git a/drivers/gpu/drm/xe/xe_sync_types.h b/drivers/gpu/drm/xe/xe_sync_types.h index 30ac3f51993b..b88f1833e28c 100644 --- a/drivers/gpu/drm/xe/xe_sync_types.h +++ b/drivers/gpu/drm/xe/xe_sync_types.h @@ -18,9 +18,12 @@ struct xe_sync_entry { struct drm_syncobj *syncobj; struct dma_fence *fence; struct dma_fence_chain *chain_fence; + struct dma_fence_chain *ufence_chain_fence; + struct drm_syncobj *ufence_syncobj; struct xe_user_fence *ufence; u64 addr; u64 timeline_value; + u64 ufence_timeline_value; u32 type; u32 flags; }; diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c index d49ba3401963..4f4f9a5c43af 100644 --- a/drivers/gpu/drm/xe/xe_tile.c +++ b/drivers/gpu/drm/xe/xe_tile.c @@ -19,9 +19,9 @@ #include "xe_tile.h" #include "xe_tile_sysfs.h" #include "xe_ttm_vram_mgr.h" -#include "xe_wa.h" #include "xe_vram.h" #include "xe_vram_types.h" +#include "xe_wa.h" /** * DOC: Multi-tile Design @@ -124,6 +124,14 @@ int xe_tile_alloc_vram(struct xe_tile *tile) return -ENOMEM; tile->mem.vram = vram; + /* + * If the kernel_vram is not already allocated, + * it means that tile has common VRAM region for + * kernel and user space. + */ + if (!tile->mem.kernel_vram) + tile->mem.kernel_vram = tile->mem.vram; + return 0; } @@ -149,10 +157,6 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id) if (err) return err; - tile->primary_gt = xe_gt_alloc(tile); - if (IS_ERR(tile->primary_gt)) - return PTR_ERR(tile->primary_gt); - xe_pcode_init(tile); return 0; diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.c b/drivers/gpu/drm/xe/xe_tile_debugfs.c index 5523874cba7b..fff242a5ae56 100644 --- a/drivers/gpu/drm/xe/xe_tile_debugfs.c +++ b/drivers/gpu/drm/xe/xe_tile_debugfs.c @@ -6,6 +6,7 @@ #include <linux/debugfs.h> #include <drm/drm_debugfs.h> +#include "xe_ggtt.h" #include "xe_pm.h" #include "xe_sa.h" #include "xe_tile_debugfs.h" @@ -16,7 +17,7 @@ static struct xe_tile *node_to_tile(struct drm_info_node *node) } /** - * tile_debugfs_simple_show - A show callback for struct drm_info_list + * xe_tile_debugfs_simple_show() - A show callback for struct drm_info_list * @m: the &seq_file * @data: data used by the drm debugfs helpers * @@ -57,7 +58,7 @@ static struct xe_tile *node_to_tile(struct drm_info_node *node) * * Return: 0 on success or a negative error code on failure. */ -static int tile_debugfs_simple_show(struct seq_file *m, void *data) +int xe_tile_debugfs_simple_show(struct seq_file *m, void *data) { struct drm_printer p = drm_seq_file_printer(m); struct drm_info_node *node = m->private; @@ -68,7 +69,7 @@ static int tile_debugfs_simple_show(struct seq_file *m, void *data) } /** - * tile_debugfs_show_with_rpm - A show callback for struct drm_info_list + * xe_tile_debugfs_show_with_rpm() - A show callback for struct drm_info_list * @m: the &seq_file * @data: data used by the drm debugfs helpers * @@ -76,7 +77,7 @@ static int tile_debugfs_simple_show(struct seq_file *m, void *data) * * Return: 0 on success or a negative error code on failure. */ -static int tile_debugfs_show_with_rpm(struct seq_file *m, void *data) +int xe_tile_debugfs_show_with_rpm(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct xe_tile *tile = node_to_tile(node); @@ -84,12 +85,17 @@ static int tile_debugfs_show_with_rpm(struct seq_file *m, void *data) int ret; xe_pm_runtime_get(xe); - ret = tile_debugfs_simple_show(m, data); + ret = xe_tile_debugfs_simple_show(m, data); xe_pm_runtime_put(xe); return ret; } +static int ggtt(struct xe_tile *tile, struct drm_printer *p) +{ + return xe_ggtt_dump(tile->mem.ggtt, p); +} + static int sa_info(struct xe_tile *tile, struct drm_printer *p) { drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p, @@ -100,7 +106,8 @@ static int sa_info(struct xe_tile *tile, struct drm_printer *p) /* only for debugfs files which can be safely used on the VF */ static const struct drm_info_list vf_safe_debugfs_list[] = { - { "sa_info", .show = tile_debugfs_show_with_rpm, .data = sa_info }, + { "ggtt", .show = xe_tile_debugfs_show_with_rpm, .data = ggtt }, + { "sa_info", .show = xe_tile_debugfs_show_with_rpm, .data = sa_info }, }; /** diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.h b/drivers/gpu/drm/xe/xe_tile_debugfs.h index 0e5f724de37f..4429c22542f4 100644 --- a/drivers/gpu/drm/xe/xe_tile_debugfs.h +++ b/drivers/gpu/drm/xe/xe_tile_debugfs.h @@ -6,8 +6,11 @@ #ifndef _XE_TILE_DEBUGFS_H_ #define _XE_TILE_DEBUGFS_H_ +struct seq_file; struct xe_tile; void xe_tile_debugfs_register(struct xe_tile *tile); +int xe_tile_debugfs_simple_show(struct seq_file *m, void *data); +int xe_tile_debugfs_show_with_rpm(struct seq_file *m, void *data); #endif diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c new file mode 100644 index 000000000000..f3f478f14ff5 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <linux/debugfs.h> +#include <drm/drm_debugfs.h> + +#include "xe_device.h" +#include "xe_device_types.h" +#include "xe_gt_sriov_pf_config.h" +#include "xe_gt_sriov_pf_debugfs.h" +#include "xe_pm.h" +#include "xe_tile_debugfs.h" +#include "xe_tile_sriov_pf_debugfs.h" +#include "xe_sriov.h" +#include "xe_sriov_pf.h" +#include "xe_sriov_pf_provision.h" + +/* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov # d_inode->i_private = (xe_device*) + * │ ├── pf # d_inode->i_private = (xe_device*) + * │ │ ├── tile0 # d_inode->i_private = (xe_tile*) + * │ │ ├── tile1 + * │ │ : : + * │ ├── vf1 # d_inode->i_private = VFID(1) + * │ │ ├── tile0 # d_inode->i_private = (xe_tile*) + * │ │ ├── tile1 + * │ │ : : + * │ ├── vfN # d_inode->i_private = VFID(N) + * │ │ ├── tile0 # d_inode->i_private = (xe_tile*) + * │ │ ├── tile1 + * : : : : + */ + +static void *extract_priv(struct dentry *d) +{ + return d->d_inode->i_private; +} + +__maybe_unused +static struct xe_tile *extract_tile(struct dentry *d) +{ + return extract_priv(d); +} + +static struct xe_device *extract_xe(struct dentry *d) +{ + return extract_priv(d->d_parent->d_parent); +} + +__maybe_unused +static unsigned int extract_vfid(struct dentry *d) +{ + void *pp = extract_priv(d->d_parent); + + return pp == extract_xe(d) ? PFID : (uintptr_t)pp; +} + +/* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * : ├── pf + * : ├── tile0 + * : ├── ggtt_available + * ├── ggtt_provisioned + */ + +static int pf_config_print_available_ggtt(struct xe_tile *tile, struct drm_printer *p) +{ + return xe_gt_sriov_pf_config_print_available_ggtt(tile->primary_gt, p); +} + +static int pf_config_print_ggtt(struct xe_tile *tile, struct drm_printer *p) +{ + return xe_gt_sriov_pf_config_print_ggtt(tile->primary_gt, p); +} + +static const struct drm_info_list pf_ggtt_info[] = { + { + "ggtt_available", + .show = xe_tile_debugfs_simple_show, + .data = pf_config_print_available_ggtt, + }, + { + "ggtt_provisioned", + .show = xe_tile_debugfs_simple_show, + .data = pf_config_print_ggtt, + }, +}; + +/* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * : ├── pf + * : ├── tile0 + * : ├── vram_provisioned + */ + +static int pf_config_print_vram(struct xe_tile *tile, struct drm_printer *p) +{ + return xe_gt_sriov_pf_config_print_lmem(tile->primary_gt, p); +} + +static const struct drm_info_list pf_vram_info[] = { + { + "vram_provisioned", + .show = xe_tile_debugfs_simple_show, + .data = pf_config_print_vram, + }, +}; + +/* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * │ ├── pf + * │ │ ├── tile0 + * │ │ │ ├── ggtt_spare + * │ │ │ ├── vram_spare + * │ │ ├── tile1 + * │ │ : : + * │ ├── vf1 + * │ : ├── tile0 + * │ │ ├── ggtt_quota + * │ │ ├── vram_quota + * │ ├── tile1 + * │ : : + */ + +#define DEFINE_SRIOV_TILE_CONFIG_DEBUGFS_ATTRIBUTE(NAME, CONFIG, TYPE, FORMAT) \ + \ +static int NAME##_set(void *data, u64 val) \ +{ \ + struct xe_tile *tile = extract_tile(data); \ + unsigned int vfid = extract_vfid(data); \ + struct xe_gt *gt = tile->primary_gt; \ + struct xe_device *xe = tile->xe; \ + int err; \ + \ + if (val > (TYPE)~0ull) \ + return -EOVERFLOW; \ + \ + xe_pm_runtime_get(xe); \ + err = xe_sriov_pf_wait_ready(xe) ?: \ + xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \ + if (!err) \ + xe_sriov_pf_provision_set_custom_mode(xe); \ + xe_pm_runtime_put(xe); \ + \ + return err; \ +} \ + \ +static int NAME##_get(void *data, u64 *val) \ +{ \ + struct xe_tile *tile = extract_tile(data); \ + unsigned int vfid = extract_vfid(data); \ + struct xe_gt *gt = tile->primary_gt; \ + \ + *val = xe_gt_sriov_pf_config_get_##CONFIG(gt, vfid); \ + return 0; \ +} \ + \ +DEFINE_DEBUGFS_ATTRIBUTE(NAME##_fops, NAME##_get, NAME##_set, FORMAT) + +DEFINE_SRIOV_TILE_CONFIG_DEBUGFS_ATTRIBUTE(ggtt, ggtt, u64, "%llu\n"); +DEFINE_SRIOV_TILE_CONFIG_DEBUGFS_ATTRIBUTE(vram, lmem, u64, "%llu\n"); + +static void pf_add_config_attrs(struct xe_tile *tile, struct dentry *dent, unsigned int vfid) +{ + struct xe_device *xe = tile->xe; + + xe_tile_assert(tile, tile == extract_tile(dent)); + xe_tile_assert(tile, vfid == extract_vfid(dent)); + + debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare", + 0644, dent, dent, &ggtt_fops); + if (IS_DGFX(xe)) + debugfs_create_file_unsafe(vfid ? "vram_quota" : "vram_spare", + xe_device_has_lmtt(xe) ? 0644 : 0444, + dent, dent, &vram_fops); +} + +static void pf_populate_tile(struct xe_tile *tile, struct dentry *dent, unsigned int vfid) +{ + struct xe_device *xe = tile->xe; + struct drm_minor *minor = xe->drm.primary; + struct xe_gt *gt; + unsigned int id; + + pf_add_config_attrs(tile, dent, vfid); + + if (!vfid) { + drm_debugfs_create_files(pf_ggtt_info, + ARRAY_SIZE(pf_ggtt_info), + dent, minor); + if (IS_DGFX(xe)) + drm_debugfs_create_files(pf_vram_info, + ARRAY_SIZE(pf_vram_info), + dent, minor); + } + + for_each_gt_on_tile(gt, tile, id) + xe_gt_sriov_pf_debugfs_populate(gt, dent, vfid); +} + +/** + * xe_tile_sriov_pf_debugfs_populate() - Populate SR-IOV debugfs tree with tile files. + * @tile: the &xe_tile to register + * @parent: the parent &dentry that represents the SR-IOV @vfid function + * @vfid: the VF identifier + * + * Add to the @parent directory new debugfs directory that will represent a @tile and + * populate it with files that are related to the SR-IOV @vfid function. + * + * This function can only be called on PF. + */ +void xe_tile_sriov_pf_debugfs_populate(struct xe_tile *tile, struct dentry *parent, + unsigned int vfid) +{ + struct xe_device *xe = tile->xe; + struct dentry *dent; + char name[10]; /* should be enough up to "tile%u\0" for 2^16 - 1 */ + + xe_tile_assert(tile, IS_SRIOV_PF(xe)); + xe_tile_assert(tile, extract_priv(parent->d_parent) == xe); + xe_tile_assert(tile, extract_priv(parent) == tile->xe || + (uintptr_t)extract_priv(parent) == vfid); + + /* + * /sys/kernel/debug/dri/BDF/ + * ├── sriov + * │ ├── pf # parent, d_inode->i_private = (xe_device*) + * │ │ ├── tile0 # d_inode->i_private = (xe_tile*) + * │ │ ├── tile1 + * │ │ : : + * │ ├── vf1 # parent, d_inode->i_private = VFID(1) + * │ │ ├── tile0 # d_inode->i_private = (xe_tile*) + * │ │ ├── tile1 + * : : : : + */ + snprintf(name, sizeof(name), "tile%u", tile->id); + dent = debugfs_create_dir(name, parent); + if (IS_ERR(dent)) + return; + dent->d_inode->i_private = tile; + + xe_tile_assert(tile, extract_tile(dent) == tile); + xe_tile_assert(tile, extract_vfid(dent) == vfid); + xe_tile_assert(tile, extract_xe(dent) == xe); + + pf_populate_tile(tile, dent, vfid); +} diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h new file mode 100644 index 000000000000..55d179c44634 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_TILE_SRIOV_PF_DEBUGFS_H_ +#define _XE_TILE_SRIOV_PF_DEBUGFS_H_ + +struct dentry; +struct xe_tile; + +void xe_tile_sriov_pf_debugfs_populate(struct xe_tile *tile, struct dentry *parent, + unsigned int vfid); + +#endif diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_printk.h b/drivers/gpu/drm/xe/xe_tile_sriov_printk.h new file mode 100644 index 000000000000..68323512872c --- /dev/null +++ b/drivers/gpu/drm/xe/xe_tile_sriov_printk.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_TILE_SRIOV_PRINTK_H_ +#define _XE_TILE_SRIOV_PRINTK_H_ + +#include "xe_tile_printk.h" +#include "xe_sriov_printk.h" + +#define __XE_TILE_SRIOV_PRINTK_FMT(_tile, _fmt, ...) \ + __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__) + +#define xe_tile_sriov_printk(_tile, _level, _fmt, ...) \ + xe_sriov_##_level((_tile)->xe, __XE_TILE_SRIOV_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__)) + +#define xe_tile_sriov_err(_tile, _fmt, ...) \ + xe_tile_sriov_printk(_tile, err, _fmt, ##__VA_ARGS__) + +#define xe_tile_sriov_notice(_tile, _fmt, ...) \ + xe_tile_sriov_printk(_tile, notice, _fmt, ##__VA_ARGS__) + +#define xe_tile_sriov_info(_tile, _fmt, ...) \ + xe_tile_sriov_printk(_tile, info, _fmt, ##__VA_ARGS__) + +#define xe_tile_sriov_dbg(_tile, _fmt, ...) \ + xe_tile_sriov_printk(_tile, dbg, _fmt, ##__VA_ARGS__) + +#define xe_tile_sriov_dbg_verbose(_tile, _fmt, ...) \ + xe_tile_sriov_printk(_tile, dbg_verbose, _fmt, ##__VA_ARGS__) + +#endif diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c index f221dbed16f0..c9bac2cfdd04 100644 --- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c @@ -9,7 +9,6 @@ #include "xe_assert.h" #include "xe_ggtt.h" -#include "xe_gt_sriov_vf.h" #include "xe_sriov.h" #include "xe_sriov_printk.h" #include "xe_tile_sriov_vf.h" @@ -40,10 +39,10 @@ static int vf_init_ggtt_balloons(struct xe_tile *tile) * * Return: 0 on success or a negative error code on failure. */ -int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile) +static int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile) { - u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt); - u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt); + u64 ggtt_base = tile->sriov.vf.self_config.ggtt_base; + u64 ggtt_size = tile->sriov.vf.self_config.ggtt_size; struct xe_device *xe = tile_to_xe(tile); u64 wopcm = xe_wopcm_size(xe); u64 start, end; @@ -232,7 +231,7 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile) */ /** - * xe_tile_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range. + * xe_tile_sriov_vf_fixup_ggtt_nodes_locked - Shift GGTT allocations to match assigned range. * @tile: the &xe_tile struct instance * @shift: the shift value * @@ -240,15 +239,112 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile) * within the global space. This range might have changed during migration, * which requires all memory addresses pointing to GGTT to be shifted. */ -void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift) +void xe_tile_sriov_vf_fixup_ggtt_nodes_locked(struct xe_tile *tile, s64 shift) { struct xe_ggtt *ggtt = tile->mem.ggtt; - mutex_lock(&ggtt->lock); + lockdep_assert_held(&ggtt->lock); xe_tile_sriov_vf_deballoon_ggtt_locked(tile); xe_ggtt_shift_nodes_locked(ggtt, shift); xe_tile_sriov_vf_balloon_ggtt_locked(tile); +} - mutex_unlock(&ggtt->lock); +/** + * xe_tile_sriov_vf_lmem - VF LMEM configuration. + * @tile: the &xe_tile + * + * This function is for VF use only. + * + * Return: size of the LMEM assigned to VF. + */ +u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile) +{ + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config; + + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); + + return config->lmem_size; +} + +/** + * xe_tile_sriov_vf_lmem_store - Store VF LMEM configuration + * @tile: the &xe_tile + * @lmem_size: VF LMEM size to store + * + * This function is for VF use only. + */ +void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size) +{ + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config; + + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); + + config->lmem_size = lmem_size; +} + +/** + * xe_tile_sriov_vf_ggtt - VF GGTT configuration. + * @tile: the &xe_tile + * + * This function is for VF use only. + * + * Return: size of the GGTT assigned to VF. + */ +u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile) +{ + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config; + + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); + + return config->ggtt_size; +} + +/** + * xe_tile_sriov_vf_ggtt_store - Store VF GGTT configuration + * @tile: the &xe_tile + * @ggtt_size: VF GGTT size to store + * + * This function is for VF use only. + */ +void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size) +{ + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config; + + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); + + config->ggtt_size = ggtt_size; +} + +/** + * xe_tile_sriov_vf_ggtt_base - VF GGTT base configuration. + * @tile: the &xe_tile + * + * This function is for VF use only. + * + * Return: base of the GGTT assigned to VF. + */ +u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile) +{ + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config; + + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); + + return config->ggtt_base; +} + +/** + * xe_tile_sriov_vf_ggtt_base_store - Store VF GGTT base configuration + * @tile: the &xe_tile + * @ggtt_base: VF GGTT base to store + * + * This function is for VF use only. + */ +void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_base) +{ + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config; + + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); + + config->ggtt_base = ggtt_base; } diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h index 93eb043171e8..749f41504883 100644 --- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h @@ -11,8 +11,13 @@ struct xe_tile; int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile); -int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile); void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile); -void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift); +void xe_tile_sriov_vf_fixup_ggtt_nodes_locked(struct xe_tile *tile, s64 shift); +u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile); +void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size); +u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile); +void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_size); +u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile); +void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size); #endif diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h new file mode 100644 index 000000000000..4807ca51614c --- /dev/null +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_TILE_SRIOV_VF_TYPES_H_ +#define _XE_TILE_SRIOV_VF_TYPES_H_ + +#include <linux/types.h> + +/** + * struct xe_tile_sriov_vf_selfconfig - VF configuration data. + */ +struct xe_tile_sriov_vf_selfconfig { + /** @ggtt_base: assigned base offset of the GGTT region. */ + u64 ggtt_base; + /** @ggtt_size: assigned size of the GGTT region. */ + u64 ggtt_size; + /** @lmem_size: assigned size of the LMEM. */ + u64 lmem_size; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.h b/drivers/gpu/drm/xe/xe_tlb_inval.h index 554634dfd4e2..05614915463a 100644 --- a/drivers/gpu/drm/xe/xe_tlb_inval.h +++ b/drivers/gpu/drm/xe/xe_tlb_inval.h @@ -33,7 +33,7 @@ void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval, * xe_tlb_inval_fence_wait() - TLB invalidiation fence wait * @fence: TLB invalidation fence to wait on * - * Wait on a TLB invalidiation fence until it signals, non interruptable + * Wait on a TLB invalidiation fence until it signals, non interruptible */ static inline void xe_tlb_inval_fence_wait(struct xe_tlb_inval_fence *fence) diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.c b/drivers/gpu/drm/xe/xe_tlb_inval_job.c index 492def04a559..1ae0dec2cf31 100644 --- a/drivers/gpu/drm/xe/xe_tlb_inval_job.c +++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.c @@ -12,6 +12,7 @@ #include "xe_tlb_inval_job.h" #include "xe_migrate.h" #include "xe_pm.h" +#include "xe_vm.h" /** struct xe_tlb_inval_job - TLB invalidation job */ struct xe_tlb_inval_job { @@ -21,6 +22,8 @@ struct xe_tlb_inval_job { struct xe_tlb_inval *tlb_inval; /** @q: exec queue issuing the invalidate */ struct xe_exec_queue *q; + /** @vm: VM which TLB invalidation is being issued for */ + struct xe_vm *vm; /** @refcount: ref count of this job */ struct kref refcount; /** @@ -32,8 +35,8 @@ struct xe_tlb_inval_job { u64 start; /** @end: End address to invalidate */ u64 end; - /** @asid: Address space ID to invalidate */ - u32 asid; + /** @type: GT type */ + int type; /** @fence_armed: Fence has been armed */ bool fence_armed; }; @@ -46,7 +49,7 @@ static struct dma_fence *xe_tlb_inval_job_run(struct xe_dep_job *dep_job) container_of(job->fence, typeof(*ifence), base); xe_tlb_inval_range(job->tlb_inval, ifence, job->start, - job->end, job->asid); + job->end, job->vm->usm.asid); return job->fence; } @@ -70,9 +73,10 @@ static const struct xe_dep_job_ops dep_job_ops = { * @q: exec queue issuing the invalidate * @tlb_inval: TLB invalidation client * @dep_scheduler: Dependency scheduler for job + * @vm: VM which TLB invalidation is being issued for * @start: Start address to invalidate * @end: End address to invalidate - * @asid: Address space ID to invalidate + * @type: GT type * * Create a TLB invalidation job and initialize internal fields. The caller is * responsible for releasing the creation reference. @@ -81,8 +85,8 @@ static const struct xe_dep_job_ops dep_job_ops = { */ struct xe_tlb_inval_job * xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval, - struct xe_dep_scheduler *dep_scheduler, u64 start, - u64 end, u32 asid) + struct xe_dep_scheduler *dep_scheduler, + struct xe_vm *vm, u64 start, u64 end, int type) { struct xe_tlb_inval_job *job; struct drm_sched_entity *entity = @@ -90,19 +94,24 @@ xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval, struct xe_tlb_inval_fence *ifence; int err; + xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || + type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT); + job = kmalloc(sizeof(*job), GFP_KERNEL); if (!job) return ERR_PTR(-ENOMEM); job->q = q; + job->vm = vm; job->tlb_inval = tlb_inval; job->start = start; job->end = end; - job->asid = asid; job->fence_armed = false; job->dep.ops = &dep_job_ops; + job->type = type; kref_init(&job->refcount); xe_exec_queue_get(q); /* Pairs with put in xe_tlb_inval_job_destroy */ + xe_vm_get(vm); /* Pairs with put in xe_tlb_inval_job_destroy */ ifence = kmalloc(sizeof(*ifence), GFP_KERNEL); if (!ifence) { @@ -124,6 +133,7 @@ xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval, err_fence: kfree(ifence); err_job: + xe_vm_put(vm); xe_exec_queue_put(q); kfree(job); @@ -138,6 +148,7 @@ static void xe_tlb_inval_job_destroy(struct kref *ref) container_of(job->fence, typeof(*ifence), base); struct xe_exec_queue *q = job->q; struct xe_device *xe = gt_to_xe(q->gt); + struct xe_vm *vm = job->vm; if (!job->fence_armed) kfree(ifence); @@ -147,6 +158,7 @@ static void xe_tlb_inval_job_destroy(struct kref *ref) drm_sched_job_cleanup(&job->dep.drm); kfree(job); + xe_vm_put(vm); /* Pairs with get from xe_tlb_inval_job_create */ xe_exec_queue_put(q); /* Pairs with get from xe_tlb_inval_job_create */ xe_pm_runtime_put(xe); /* Pairs with get from xe_tlb_inval_job_create */ } @@ -231,6 +243,11 @@ struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job, dma_fence_get(&job->dep.drm.s_fence->finished); drm_sched_entity_push_job(&job->dep.drm); + /* Let the upper layers fish this out */ + xe_exec_queue_tlb_inval_last_fence_set(job->q, job->vm, + &job->dep.drm.s_fence->finished, + job->type); + xe_migrate_job_unlock(m, job->q); /* diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.h b/drivers/gpu/drm/xe/xe_tlb_inval_job.h index e63edcb26b50..4d6df1a6c6ca 100644 --- a/drivers/gpu/drm/xe/xe_tlb_inval_job.h +++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.h @@ -11,14 +11,15 @@ struct dma_fence; struct xe_dep_scheduler; struct xe_exec_queue; +struct xe_migrate; struct xe_tlb_inval; struct xe_tlb_inval_job; -struct xe_migrate; +struct xe_vm; struct xe_tlb_inval_job * xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval, struct xe_dep_scheduler *dep_scheduler, - u64 start, u64 end, u32 asid); + struct xe_vm *vm, u64 start, u64 end, int type); int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job); diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h index 314f42fcbcbd..79a97b086cb2 100644 --- a/drivers/gpu/drm/xe/xe_trace.h +++ b/drivers/gpu/drm/xe/xe_trace.h @@ -441,6 +441,29 @@ TRACE_EVENT(xe_eu_stall_data_read, __entry->read_size, __entry->total_size) ); +TRACE_EVENT(xe_exec_queue_reach_max_job_count, + TP_PROTO(struct xe_exec_queue *q, int max_cnt), + TP_ARGS(q, max_cnt), + + TP_STRUCT__entry(__string(dev, __dev_name_eq(q)) + __field(enum xe_engine_class, class) + __field(u32, logical_mask) + __field(u16, guc_id) + __field(int, max_cnt) + ), + + TP_fast_assign(__assign_str(dev); + __entry->class = q->class; + __entry->logical_mask = q->logical_mask; + __entry->guc_id = q->guc->id; + __entry->max_cnt = max_cnt; + ), + + TP_printk("dev=%s, job count exceeded the maximum limit (%d) per exec queue. engine_class=0x%x, logical_mask=0x%x, guc_id=%d", + __get_str(dev), __entry->max_cnt, + __entry->class, __entry->logical_mask, __entry->guc_id) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c index dc588255674d..1bddecfb723a 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT /* * Copyright © 2021-2023 Intel Corporation - * Copyright (C) 2021-2002 Red Hat + * Copyright (C) 2021-2022 Red Hat */ #include <drm/drm_managed.h> @@ -24,8 +24,8 @@ #include "xe_sriov.h" #include "xe_ttm_stolen_mgr.h" #include "xe_ttm_vram_mgr.h" -#include "xe_wa.h" #include "xe_vram.h" +#include "xe_wa.h" struct xe_ttm_stolen_mgr { struct xe_ttm_vram_mgr base; @@ -81,7 +81,7 @@ static u32 get_wopcm_size(struct xe_device *xe) return wopcm_size; } -static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) +static u64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) { struct xe_vram_region *tile_vram = xe_device_get_root_tile(xe)->mem.vram; resource_size_t tile_io_start = xe_vram_region_io_start(tile_vram); @@ -105,6 +105,8 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) return 0; stolen_size = tile_size - mgr->stolen_base; + + xe_assert(xe, stolen_size >= wopcm_size); stolen_size -= wopcm_size; /* Verify usage fits in the actual resource available */ diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c index d38b91872da3..3e404eb8d098 100644 --- a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT /* * Copyright © 2021-2022 Intel Corporation - * Copyright (C) 2021-2002 Red Hat + * Copyright (C) 2021-2022 Red Hat */ #include "xe_ttm_sys_mgr.h" @@ -85,7 +85,7 @@ static const struct ttm_resource_manager_func xe_ttm_sys_mgr_func = { .debug = xe_ttm_sys_mgr_debug }; -static void ttm_sys_mgr_fini(struct drm_device *drm, void *arg) +static void xe_ttm_sys_mgr_fini(struct drm_device *drm, void *arg) { struct xe_device *xe = (struct xe_device *)arg; struct ttm_resource_manager *man = &xe->mem.sys_mgr; @@ -116,5 +116,5 @@ int xe_ttm_sys_mgr_init(struct xe_device *xe) ttm_resource_manager_init(man, &xe->ttm, gtt_size >> PAGE_SHIFT); ttm_set_driver_manager(&xe->ttm, XE_PL_TT, man); ttm_resource_manager_set_used(man, true); - return drmm_add_action_or_reset(&xe->drm, ttm_sys_mgr_fini, xe); + return drmm_add_action_or_reset(&xe->drm, xe_ttm_sys_mgr_fini, xe); } diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c index 9175b4a2214b..9f70802fce92 100644 --- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT /* * Copyright © 2021-2022 Intel Corporation - * Copyright (C) 2021-2002 Red Hat + * Copyright (C) 2021-2022 Red Hat */ #include <drm/drm_managed.h> @@ -284,7 +284,7 @@ static const struct ttm_resource_manager_func xe_ttm_vram_mgr_func = { .debug = xe_ttm_vram_mgr_debug }; -static void ttm_vram_mgr_fini(struct drm_device *dev, void *arg) +static void xe_ttm_vram_mgr_fini(struct drm_device *dev, void *arg) { struct xe_device *xe = to_xe_device(dev); struct xe_ttm_vram_mgr *mgr = arg; @@ -335,7 +335,7 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr, ttm_set_driver_manager(&xe->ttm, mem_type, &mgr->manager); ttm_resource_manager_set_used(&mgr->manager, true); - return drmm_add_action_or_reset(&xe->drm, ttm_vram_mgr_fini, mgr); + return drmm_add_action_or_reset(&xe->drm, xe_ttm_vram_mgr_fini, mgr); } /** diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h index 1144f9232ebb..a71e14818ec2 100644 --- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h +++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h @@ -10,7 +10,7 @@ #include <drm/ttm/ttm_device.h> /** - * struct xe_ttm_vram_mgr - XE TTM VRAM manager + * struct xe_ttm_vram_mgr - Xe TTM VRAM manager * * Manages placement of TTM resource in VRAM. */ @@ -32,7 +32,7 @@ struct xe_ttm_vram_mgr { }; /** - * struct xe_ttm_vram_mgr_resource - XE TTM VRAM resource + * struct xe_ttm_vram_mgr_resource - Xe TTM VRAM resource */ struct xe_ttm_vram_mgr_resource { /** @base: Base TTM resource */ diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index a524170a04d0..5766fa7742d3 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -8,6 +8,7 @@ #include <kunit/visibility.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "regs/xe_gt_regs.h" #include "xe_gt_types.h" @@ -40,7 +41,8 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f))) }, { XE_RTP_NAME("Tuning: Compression Overfetch"), - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED), + FUNC(xe_rtp_match_has_flat_ccs)), XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX), SET(CCCHKNREG1, L3CMPCTRL)) }, @@ -58,12 +60,14 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN)) }, { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"), - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED), + FUNC(xe_rtp_match_has_flat_ccs)), XE_RTP_ACTIONS(SET(L3SQCREG2, COMPMEMRD256BOVRFETCHEN)) }, { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"), - XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED), + FUNC(xe_rtp_match_has_flat_ccs)), XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2, COMPMEMRD256BOVRFETCHEN)) }, @@ -214,7 +218,14 @@ void xe_tuning_process_lrc(struct xe_hw_engine *hwe) xe_rtp_process_to_sr(&ctx, lrc_tunings, ARRAY_SIZE(lrc_tunings), &hwe->reg_lrc); } -void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p) +/** + * xe_tuning_dump() - Dump GT tuning info into a drm printer. + * @gt: the &xe_gt + * @p: the &drm_printer + * + * Return: always 0. + */ +int xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p) { size_t idx; @@ -222,11 +233,15 @@ void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p) for_each_set_bit(idx, gt->tuning_active.gt, ARRAY_SIZE(gt_tunings)) drm_printf_indent(p, 1, "%s\n", gt_tunings[idx].name); - drm_printf(p, "\nEngine Tunings\n"); + drm_puts(p, "\n"); + drm_printf(p, "Engine Tunings\n"); for_each_set_bit(idx, gt->tuning_active.engine, ARRAY_SIZE(engine_tunings)) drm_printf_indent(p, 1, "%s\n", engine_tunings[idx].name); - drm_printf(p, "\nLRC Tunings\n"); + drm_puts(p, "\n"); + drm_printf(p, "LRC Tunings\n"); for_each_set_bit(idx, gt->tuning_active.lrc, ARRAY_SIZE(lrc_tunings)) drm_printf_indent(p, 1, "%s\n", lrc_tunings[idx].name); + + return 0; } diff --git a/drivers/gpu/drm/xe/xe_tuning.h b/drivers/gpu/drm/xe/xe_tuning.h index dd0d3ccc9c65..c1cc5927fda7 100644 --- a/drivers/gpu/drm/xe/xe_tuning.h +++ b/drivers/gpu/drm/xe/xe_tuning.h @@ -14,6 +14,6 @@ int xe_tuning_init(struct xe_gt *gt); void xe_tuning_process_gt(struct xe_gt *gt); void xe_tuning_process_engine(struct xe_hw_engine *hwe); void xe_tuning_process_lrc(struct xe_hw_engine *hwe); -void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p); +int xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p); #endif diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h index 77a1dcf8b4ed..2ebe8c9db6ce 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw_types.h +++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h @@ -62,7 +62,7 @@ enum xe_uc_fw_type { }; /** - * struct xe_uc_fw_version - Version for XE micro controller firmware + * struct xe_uc_fw_version - Version for Xe micro controller firmware */ struct xe_uc_fw_version { /** @branch: branch version of the FW (not always available) */ @@ -84,7 +84,7 @@ enum xe_uc_fw_version_types { }; /** - * struct xe_uc_fw - XE micro controller firmware + * struct xe_uc_fw - Xe micro controller firmware */ struct xe_uc_fw { /** @type: type uC firmware */ @@ -112,7 +112,7 @@ struct xe_uc_fw { /** @size: size of uC firmware including css header */ size_t size; - /** @bo: XE BO for uC firmware */ + /** @bo: Xe BO for uC firmware */ struct xe_bo *bo; /** @has_gsc_headers: whether the FW image starts with GSC headers */ diff --git a/drivers/gpu/drm/xe/xe_uc_types.h b/drivers/gpu/drm/xe/xe_uc_types.h index 9924e4484866..1708379dc834 100644 --- a/drivers/gpu/drm/xe/xe_uc_types.h +++ b/drivers/gpu/drm/xe/xe_uc_types.h @@ -12,7 +12,7 @@ #include "xe_wopcm_types.h" /** - * struct xe_uc - XE micro controllers + * struct xe_uc - Xe micro controllers */ struct xe_uc { /** @guc: Graphics micro controller */ diff --git a/drivers/gpu/drm/xe/xe_userptr.c b/drivers/gpu/drm/xe/xe_userptr.c index f16e92cd8090..0d9130b1958a 100644 --- a/drivers/gpu/drm/xe/xe_userptr.c +++ b/drivers/gpu/drm/xe/xe_userptr.c @@ -3,6 +3,7 @@ * Copyright © 2025 Intel Corporation */ +#include "xe_svm.h" #include "xe_userptr.h" #include <linux/mm.h> @@ -54,7 +55,8 @@ int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) struct xe_device *xe = vm->xe; struct drm_gpusvm_ctx ctx = { .read_only = xe_vma_read_only(vma), - .device_private_page_owner = NULL, + .device_private_page_owner = xe_svm_devm_owner(xe), + .allow_mixed = true, }; lockdep_assert_held(&vm->lock); diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h index b2d09c596714..a30e732c4d51 100644 --- a/drivers/gpu/drm/xe/xe_validation.h +++ b/drivers/gpu/drm/xe/xe_validation.h @@ -108,7 +108,7 @@ struct xe_val_flags { * @request_exclusive: Whether to lock exclusively (write mode) the next time * the domain lock is locked. * @exec_flags: The drm_exec flags used for drm_exec (re-)initialization. - * @nr: The drm_exec nr parameter used for drm_exec (re-)initializaiton. + * @nr: The drm_exec nr parameter used for drm_exec (re-)initialization. */ struct xe_validation_ctx { struct drm_exec *exec; @@ -137,7 +137,7 @@ bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret); * @_ret: The current error value possibly holding -ENOMEM * * Use this in way similar to drm_exec_retry_on_contention(). - * If @_ret contains -ENOMEM the tranaction is restarted once in a way that + * If @_ret contains -ENOMEM the transaction is restarted once in a way that * blocks other transactions and allows exhastive eviction. If the transaction * was already restarted once, Just return the -ENOMEM. May also set * _ret to -EINTR if not retrying and waits are interruptible. @@ -180,7 +180,7 @@ static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T) * @_val: The xe_validation_device. * @_exec: The struct drm_exec object * @_flags: Flags for the xe_validation_ctx initialization. - * @_ret: Return in / out parameter. May be set by this macro. Typicall 0 when called. + * @_ret: Return in / out parameter. May be set by this macro. Typically 0 when called. * * This macro is will initiate a drm_exec transaction with additional support for * exhaustive eviction. diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 63c65e3d207b..7cac646bdf1c 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -27,7 +27,6 @@ #include "xe_device.h" #include "xe_drm_client.h" #include "xe_exec_queue.h" -#include "xe_gt_pagefault.h" #include "xe_migrate.h" #include "xe_pat.h" #include "xe_pm.h" @@ -35,6 +34,7 @@ #include "xe_pt.h" #include "xe_pxp.h" #include "xe_res_cursor.h" +#include "xe_sriov_vf.h" #include "xe_svm.h" #include "xe_sync.h" #include "xe_tile.h" @@ -111,12 +111,22 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, static int wait_for_existing_preempt_fences(struct xe_vm *vm) { struct xe_exec_queue *q; + bool vf_migration = IS_SRIOV_VF(vm->xe) && + xe_sriov_vf_migration_supported(vm->xe); + signed long wait_time = vf_migration ? HZ / 5 : MAX_SCHEDULE_TIMEOUT; xe_vm_assert_held(vm); list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { if (q->lr.pfence) { - long timeout = dma_fence_wait(q->lr.pfence, false); + long timeout; + + timeout = dma_fence_wait_timeout(q->lr.pfence, false, + wait_time); + if (!timeout) { + xe_assert(vm->xe, vf_migration); + return -EAGAIN; + } /* Only -ETIME on fence indicates VM needs to be killed */ if (timeout < 0 || q->lr.pfence->error == -ETIME) @@ -466,6 +476,8 @@ static void preempt_rebind_work_func(struct work_struct *w) retry: if (!try_wait_for_completion(&vm->xe->pm_block) && vm_suspend_rebind_worker(vm)) { up_write(&vm->lock); + /* We don't actually block but don't make progress. */ + xe_pm_might_block_on_suspend(); return; } @@ -539,6 +551,19 @@ out_unlock: out_unlock_outer: if (err == -EAGAIN) { trace_xe_vm_rebind_worker_retry(vm); + + /* + * We can't block in workers on a VF which supports migration + * given this can block the VF post-migration workers from + * getting scheduled. + */ + if (IS_SRIOV_VF(vm->xe) && + xe_sriov_vf_migration_supported(vm->xe)) { + up_write(&vm->lock); + xe_vm_queue_rebind_worker(vm); + return; + } + goto retry; } @@ -729,6 +754,7 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); xe_vma_ops_init(&vops, vm, NULL, NULL, 0); + vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT; for_each_tile(tile, vm->xe, id) { vops.pt_update_ops[id].wait_vm_bookkeep = true; vops.pt_update_ops[tile->id].q = @@ -798,7 +824,7 @@ xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops, * * (re)bind SVM range setting up GPU page tables for the range. * - * Return: dma fence for rebind to signal completion on succees, ERR_PTR on + * Return: dma fence for rebind to signal completion on success, ERR_PTR on * failure */ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm, @@ -819,6 +845,7 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm, xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); xe_vma_ops_init(&vops, vm, NULL, NULL, 0); + vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT; for_each_tile(tile, vm->xe, id) { vops.pt_update_ops[id].wait_vm_bookkeep = true; vops.pt_update_ops[tile->id].q = @@ -881,7 +908,7 @@ xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops, * * Unbind SVM range removing the GPU page tables for the range. * - * Return: dma fence for unbind to signal completion on succees, ERR_PTR on + * Return: dma fence for unbind to signal completion on success, ERR_PTR on * failure */ struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm, @@ -1265,7 +1292,7 @@ static u16 pde_pat_index(struct xe_bo *bo) * selection of options. The user PAT index is only for encoding leaf * nodes, where we have use of more bits to do the encoding. The * non-leaf nodes are instead under driver control so the chosen index - * here should be distict from the user PAT index. Also the + * here should be distinct from the user PAT index. Also the * corresponding coherency of the PAT index should be tied to the * allocation type of the page table (or at least we should pick * something which is always safe). @@ -1432,7 +1459,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) struct xe_validation_ctx ctx; struct drm_exec exec; struct xe_vm *vm; - int err, number_tiles = 0; + int err; struct xe_tile *tile; u8 id; @@ -1593,13 +1620,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) goto err_close; } vm->q[id] = q; - number_tiles++; } } - if (number_tiles > 1) - vm->composite_fence_ctx = dma_fence_context_alloc(1); - if (xef && xe->info.has_asid) { u32 asid; @@ -1705,8 +1728,13 @@ void xe_vm_close_and_put(struct xe_vm *vm) down_write(&vm->lock); for_each_tile(tile, xe, id) { - if (vm->q[id]) + if (vm->q[id]) { + int i; + xe_exec_queue_last_fence_put(vm->q[id], vm); + for_each_tlb_inval(i) + xe_exec_queue_tlb_inval_last_fence_put(vm->q[id], vm, i); + } } up_write(&vm->lock); @@ -1875,6 +1903,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, struct xe_device *xe = to_xe_device(dev); struct xe_file *xef = to_xe_file(file); struct drm_xe_vm_create *args = data; + struct xe_gt *wa_gt = xe_root_mmio_gt(xe); struct xe_vm *vm; u32 id; int err; @@ -1883,7 +1912,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, if (XE_IOCTL_DBG(xe, args->extensions)) return -EINVAL; - if (XE_GT_WA(xe_root_mmio_gt(xe), 14016763929)) + if (wa_gt && XE_GT_WA(wa_gt, 22014953428)) args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE; if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE && @@ -3075,20 +3104,31 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, struct dma_fence *fence = NULL; struct dma_fence **fences = NULL; struct dma_fence_array *cf = NULL; - int number_tiles = 0, current_fence = 0, err; + int number_tiles = 0, current_fence = 0, n_fence = 0, err; u8 id; number_tiles = vm_ops_setup_tile_args(vm, vops); if (number_tiles == 0) return ERR_PTR(-ENODATA); - if (number_tiles > 1) { - fences = kmalloc_array(number_tiles, sizeof(*fences), - GFP_KERNEL); - if (!fences) { - fence = ERR_PTR(-ENOMEM); - goto err_trace; - } + if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT) { + for_each_tile(tile, vm->xe, id) + ++n_fence; + } else { + for_each_tile(tile, vm->xe, id) + n_fence += (1 + XE_MAX_GT_PER_TILE); + } + + fences = kmalloc_array(n_fence, sizeof(*fences), GFP_KERNEL); + if (!fences) { + fence = ERR_PTR(-ENOMEM); + goto err_trace; + } + + cf = dma_fence_array_alloc(n_fence); + if (!cf) { + fence = ERR_PTR(-ENOMEM); + goto err_out; } for_each_tile(tile, vm->xe, id) { @@ -3105,30 +3145,34 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, trace_xe_vm_ops_execute(vops); for_each_tile(tile, vm->xe, id) { + struct xe_exec_queue *q = vops->pt_update_ops[tile->id].q; + int i; + + fence = NULL; if (!vops->pt_update_ops[id].num_ops) - continue; + goto collect_fences; fence = xe_pt_update_ops_run(tile, vops); if (IS_ERR(fence)) goto err_out; - if (fences) - fences[current_fence++] = fence; - } +collect_fences: + fences[current_fence++] = fence ?: dma_fence_get_stub(); + if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT) + continue; - if (fences) { - cf = dma_fence_array_create(number_tiles, fences, - vm->composite_fence_ctx, - vm->composite_fence_seqno++, - false); - if (!cf) { - --vm->composite_fence_seqno; - fence = ERR_PTR(-ENOMEM); - goto err_out; - } - fence = &cf->base; + xe_migrate_job_lock(tile->migrate, q); + for_each_tlb_inval(i) + fences[current_fence++] = + xe_exec_queue_tlb_inval_last_fence_get(q, vm, i); + xe_migrate_job_unlock(tile->migrate, q); } + xe_assert(vm->xe, current_fence == n_fence); + dma_fence_array_init(cf, n_fence, fences, dma_fence_context_alloc(1), + 1, false); + fence = &cf->base; + for_each_tile(tile, vm->xe, id) { if (!vops->pt_update_ops[id].num_ops) continue; @@ -3188,7 +3232,6 @@ static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op, static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, struct dma_fence *fence) { - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q); struct xe_user_fence *ufence; struct xe_vma_op *op; int i; @@ -3209,7 +3252,6 @@ static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, if (fence) { for (i = 0; i < vops->num_syncs; i++) xe_sync_entry_signal(vops->syncs + i, fence); - xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); } } @@ -3369,8 +3411,10 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, op == DRM_XE_VM_BIND_OP_PREFETCH) || XE_IOCTL_DBG(xe, prefetch_region && op != DRM_XE_VM_BIND_OP_PREFETCH) || - XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC && - !(BIT(prefetch_region) & xe->info.mem_region_mask))) || + XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC && + /* Guard against undefined shift in BIT(prefetch_region) */ + (prefetch_region >= (sizeof(xe->info.mem_region_mask) * 8) || + !(BIT(prefetch_region) & xe->info.mem_region_mask)))) || XE_IOCTL_DBG(xe, obj && op == DRM_XE_VM_BIND_OP_UNMAP) || XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) && @@ -3403,19 +3447,19 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, struct xe_sync_entry *syncs, int num_syncs) { - struct dma_fence *fence; + struct dma_fence *fence = NULL; int i, err = 0; - fence = xe_sync_in_fence_get(syncs, num_syncs, - to_wait_exec_queue(vm, q), vm); - if (IS_ERR(fence)) - return PTR_ERR(fence); + if (num_syncs) { + fence = xe_sync_in_fence_get(syncs, num_syncs, + to_wait_exec_queue(vm, q), vm); + if (IS_ERR(fence)) + return PTR_ERR(fence); - for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], fence); + for (i = 0; i < num_syncs; i++) + xe_sync_entry_signal(&syncs[i], fence); + } - xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, - fence); dma_fence_put(fence); return err; @@ -3606,8 +3650,12 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) syncs_user = u64_to_user_ptr(args->syncs); for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { + struct xe_exec_queue *__q = q ?: vm->q[0]; + err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs], &syncs_user[num_syncs], + __q->ufence_syncobj, + ++__q->ufence_timeline_value, (xe_vm_in_lr_mode(vm) ? SYNC_PARSE_FLAG_LR_MODE : 0) | (!args->num_binds ? @@ -4145,7 +4193,7 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap) /** * xe_vma_need_vram_for_atomic - Check if VMA needs VRAM migration for atomic operations - * @xe: Pointer to the XE device structure + * @xe: Pointer to the Xe device structure * @vma: Pointer to the virtual memory area (VMA) structure * @is_atomic: In pagefault path and atomic operation * @@ -4292,7 +4340,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm, xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL); } else if (__op->op == DRM_GPUVA_OP_MAP) { vma = op->map.vma; - /* In case of madvise call, MAP will always be follwed by REMAP. + /* In case of madvise call, MAP will always be followed by REMAP. * Therefore temp_attr will always have sane values, making it safe to * copy them to new vma. */ diff --git a/drivers/gpu/drm/xe/xe_vm_doc.h b/drivers/gpu/drm/xe/xe_vm_doc.h index 1030ce214032..02e5288373c9 100644 --- a/drivers/gpu/drm/xe/xe_vm_doc.h +++ b/drivers/gpu/drm/xe/xe_vm_doc.h @@ -7,7 +7,7 @@ #define _XE_VM_DOC_H_ /** - * DOC: XE VM (user address space) + * DOC: Xe VM (user address space) * * VM creation * =========== @@ -202,13 +202,13 @@ * User pointers are user allocated memory (malloc'd, mmap'd, etc..) for which the * user wants to create a GPU mapping. Typically in other DRM drivers a dummy BO * was created and then a binding was created. We bypass creating a dummy BO in - * XE and simply create a binding directly from the userptr. + * Xe and simply create a binding directly from the userptr. * * Invalidation * ------------ * * Since this a core kernel managed memory the kernel can move this memory - * whenever it wants. We register an invalidation MMU notifier to alert XE when + * whenever it wants. We register an invalidation MMU notifier to alert Xe when * a user pointer is about to move. The invalidation notifier needs to block * until all pending users (jobs or compute mode engines) of the userptr are * idle to ensure no faults. This done by waiting on all of VM's dma-resv slots. @@ -419,7 +419,7 @@ * ======= * * VM locking protects all of the core data paths (bind operations, execs, - * evictions, and compute mode rebind worker) in XE. + * evictions, and compute mode rebind worker) in Xe. * * Locks * ----- diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index d6e2a0fdd4b3..ccd6cc090309 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -52,7 +52,7 @@ struct xe_vm_pgtable_update_op; * struct xe_vma_mem_attr - memory attributes associated with vma */ struct xe_vma_mem_attr { - /** @preferred_loc: perferred memory_location */ + /** @preferred_loc: preferred memory_location */ struct { /** @preferred_loc.migration_policy: Pages migration policy */ u32 migration_policy; @@ -221,11 +221,6 @@ struct xe_vm { #define XE_VM_FLAG_GSC BIT(8) unsigned long flags; - /** @composite_fence_ctx: context composite fence */ - u64 composite_fence_ctx; - /** @composite_fence_seqno: seqno for composite fence */ - u32 composite_fence_seqno; - /** * @lock: outer most lock, protects objects of anything attached to this * VM @@ -338,7 +333,7 @@ struct xe_vm { u64 tlb_flush_seqno; /** @batch_invalidate_tlb: Always invalidate TLB before batch start */ bool batch_invalidate_tlb; - /** @xef: XE file handle for tracking this VM's drm client */ + /** @xef: Xe file handle for tracking this VM's drm client */ struct xe_file *xef; }; @@ -471,6 +466,7 @@ struct xe_vma_ops { #define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0) #define XE_VMA_OPS_FLAG_MADVISE BIT(1) #define XE_VMA_OPS_ARRAY_OF_BINDS BIT(2) +#define XE_VMA_OPS_FLAG_SKIP_TLB_WAIT BIT(3) u32 flags; #ifdef TEST_VM_OPS_ERROR /** @inject_error: inject error to test error handling */ diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index 652df7a5f4f6..d50baefcd124 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -13,50 +13,25 @@ #include "regs/xe_gt_regs.h" #include "regs/xe_regs.h" #include "xe_assert.h" +#include "xe_bo.h" #include "xe_device.h" #include "xe_force_wake.h" #include "xe_gt_mcr.h" -#include "xe_gt_sriov_vf.h" #include "xe_mmio.h" #include "xe_module.h" #include "xe_sriov.h" +#include "xe_tile_sriov_vf.h" #include "xe_ttm_vram_mgr.h" #include "xe_vram.h" #include "xe_vram_types.h" -#define BAR_SIZE_SHIFT 20 - -/* - * Release all the BARs that could influence/block LMEMBAR resizing, i.e. - * assigned IORESOURCE_MEM_64 BARs - */ -static void release_bars(struct pci_dev *pdev) -{ - struct resource *res; - int i; - - pci_dev_for_each_resource(pdev, res, i) { - /* Resource already un-assigned, do not reset it */ - if (!res->parent) - continue; - - /* No need to release unrelated BARs */ - if (!(res->flags & IORESOURCE_MEM_64)) - continue; - - pci_release_resource(pdev, i); - } -} - static void resize_bar(struct xe_device *xe, int resno, resource_size_t size) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); int bar_size = pci_rebar_bytes_to_size(size); int ret; - release_bars(pdev); - - ret = pci_resize_resource(pdev, resno, bar_size); + ret = pci_resize_resource(pdev, resno, bar_size, 0); if (ret) { drm_info(&xe->drm, "Failed to resize BAR%d to %dM (%pe). Consider enabling 'Resizable BAR' support in your BIOS\n", resno, 1 << bar_size, ERR_PTR(ret)); @@ -78,41 +53,37 @@ void xe_vram_resize_bar(struct xe_device *xe) resource_size_t current_size; resource_size_t rebar_size; struct resource *root_res; - u32 bar_size_mask; + int max_size, i; u32 pci_cmd; - int i; /* gather some relevant info */ current_size = pci_resource_len(pdev, LMEM_BAR); - bar_size_mask = pci_rebar_get_possible_sizes(pdev, LMEM_BAR); - - if (!bar_size_mask) - return; if (force_vram_bar_size < 0) return; /* set to a specific size? */ if (force_vram_bar_size) { - u32 bar_size_bit; - - rebar_size = force_vram_bar_size * (resource_size_t)SZ_1M; + rebar_size = pci_rebar_bytes_to_size(force_vram_bar_size * + (resource_size_t)SZ_1M); - bar_size_bit = bar_size_mask & BIT(pci_rebar_bytes_to_size(rebar_size)); - - if (!bar_size_bit) { + if (!pci_rebar_size_supported(pdev, LMEM_BAR, rebar_size)) { drm_info(&xe->drm, - "Requested size: %lluMiB is not supported by rebar sizes: 0x%x. Leaving default: %lluMiB\n", - (u64)rebar_size >> 20, bar_size_mask, (u64)current_size >> 20); + "Requested size: %lluMiB is not supported by rebar sizes: 0x%llx. Leaving default: %lluMiB\n", + (u64)pci_rebar_size_to_bytes(rebar_size) >> 20, + pci_rebar_get_possible_sizes(pdev, LMEM_BAR), + (u64)current_size >> 20); return; } - rebar_size = 1ULL << (__fls(bar_size_bit) + BAR_SIZE_SHIFT); - + rebar_size = pci_rebar_size_to_bytes(rebar_size); if (rebar_size == current_size) return; } else { - rebar_size = 1ULL << (__fls(bar_size_mask) + BAR_SIZE_SHIFT); + max_size = pci_rebar_get_max_size(pdev, LMEM_BAR); + if (max_size < 0) + return; + rebar_size = pci_rebar_size_to_bytes(max_size); /* only resize if larger than current */ if (rebar_size <= current_size) @@ -182,12 +153,17 @@ static int determine_lmem_bar_size(struct xe_device *xe, struct xe_vram_region * return 0; } -static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size) +static int get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size, u64 *poffset) { struct xe_device *xe = gt_to_xe(gt); + unsigned int fw_ref; u64 offset; u32 reg; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; + if (GRAPHICS_VER(xe) >= 20) { u64 ccs_size = tile_size / 512; u64 offset_hi, offset_lo; @@ -217,7 +193,10 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size) offset = (u64)REG_FIELD_GET(XEHP_FLAT_CCS_PTR, reg) * SZ_64K; } - return offset; + xe_force_wake_put(gt_to_fw(gt), fw_ref); + *poffset = offset; + + return 0; } /* @@ -244,7 +223,6 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, { struct xe_device *xe = tile_to_xe(tile); struct xe_gt *gt = tile->primary_gt; - unsigned int fw_ref; u64 offset; u32 reg; @@ -255,32 +233,31 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, offset = 0; for_each_tile(t, xe, id) for_each_if(t->id < tile->id) - offset += xe_gt_sriov_vf_lmem(t->primary_gt); + offset += xe_tile_sriov_vf_lmem(t); - *tile_size = xe_gt_sriov_vf_lmem(gt); + *tile_size = xe_tile_sriov_vf_lmem(tile); *vram_size = *tile_size; *tile_offset = offset; return 0; } - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) - return -ETIMEDOUT; - /* actual size */ if (unlikely(xe->info.platform == XE_DG1)) { *tile_size = pci_resource_len(to_pci_dev(xe->drm.dev), LMEM_BAR); *tile_offset = 0; } else { - reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id)); + reg = xe_mmio_read32(&tile->mmio, SG_TILE_ADDR_RANGE(tile->id)); *tile_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg) * SZ_1G; *tile_offset = (u64)REG_FIELD_GET(GENMASK(7, 1), reg) * SZ_1G; } /* minus device usage */ if (xe->info.has_flat_ccs) { - offset = get_flat_ccs_offset(gt, *tile_size); + int ret = get_flat_ccs_offset(gt, *tile_size, &offset); + + if (ret) + return ret; } else { offset = xe_mmio_read64_2x32(&tile->mmio, GSMBASE); } @@ -288,8 +265,6 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, /* remove the tile offset so we have just the available size */ *vram_size = offset - *tile_offset; - xe_force_wake_put(gt_to_fw(gt), fw_ref); - return 0; } @@ -301,8 +276,11 @@ static void vram_fini(void *arg) xe->mem.vram->mapping = NULL; - for_each_tile(tile, xe, id) + for_each_tile(tile, xe, id) { tile->mem.vram->mapping = NULL; + if (tile->mem.kernel_vram) + tile->mem.kernel_vram->mapping = NULL; + } } struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index cd03891654a1..3764abca3d4f 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -679,12 +679,14 @@ static const struct xe_rtp_entry_sr engine_was[] = { }, { XE_RTP_NAME("14023061436"), XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), + FUNC(xe_rtp_match_first_render_or_compute), OR, + GRAPHICS_VERSION_RANGE(3003, 3005), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE)) }, { XE_RTP_NAME("13012615864"), XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), OR, - GRAPHICS_VERSION(3003), + GRAPHICS_VERSION_RANGE(3003, 3005), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS)) }, @@ -695,7 +697,7 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_ACTION_FLAG(ENGINE_BASE))) }, { XE_RTP_NAME("14021402888"), - XE_RTP_RULES(GRAPHICS_VERSION(3003), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3003, 3005), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) }, { XE_RTP_NAME("18041344222"), @@ -913,9 +915,18 @@ static const struct xe_rtp_entry_sr lrc_was[] = { DIS_AUTOSTRIP)) }, { XE_RTP_NAME("22021007897"), - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3003), ENGINE_CLASS(RENDER)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE)) }, + { XE_RTP_NAME("14024681466"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1, FAST_CLEAR_VALIGN_FIX)) + }, + { XE_RTP_NAME("15016589081"), + XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0), + ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX)) + }, }; static __maybe_unused const struct xe_rtp_entry oob_was[] = { @@ -1086,7 +1097,14 @@ void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p) drm_printf_indent(p, 1, "%s\n", device_oob_was[idx].name); } -void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p) +/** + * xe_wa_gt_dump() - Dump GT workarounds into a drm printer. + * @gt: the &xe_gt + * @p: the &drm_printer + * + * Return: always 0. + */ +int xe_wa_gt_dump(struct xe_gt *gt, struct drm_printer *p) { size_t idx; @@ -1094,18 +1112,22 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p) for_each_set_bit(idx, gt->wa_active.gt, ARRAY_SIZE(gt_was)) drm_printf_indent(p, 1, "%s\n", gt_was[idx].name); - drm_printf(p, "\nEngine Workarounds\n"); + drm_puts(p, "\n"); + drm_printf(p, "Engine Workarounds\n"); for_each_set_bit(idx, gt->wa_active.engine, ARRAY_SIZE(engine_was)) drm_printf_indent(p, 1, "%s\n", engine_was[idx].name); - drm_printf(p, "\nLRC Workarounds\n"); + drm_puts(p, "\n"); + drm_printf(p, "LRC Workarounds\n"); for_each_set_bit(idx, gt->wa_active.lrc, ARRAY_SIZE(lrc_was)) drm_printf_indent(p, 1, "%s\n", lrc_was[idx].name); - drm_printf(p, "\nOOB Workarounds\n"); + drm_puts(p, "\n"); + drm_printf(p, "OOB Workarounds\n"); for_each_set_bit(idx, gt->wa_active.oob, ARRAY_SIZE(oob_was)) if (oob_was[idx].name) drm_printf_indent(p, 1, "%s\n", oob_was[idx].name); + return 0; } /* @@ -1127,6 +1149,6 @@ void xe_wa_apply_tile_workarounds(struct xe_tile *tile) if (IS_SRIOV_VF(tile->xe)) return; - if (XE_GT_WA(tile->primary_gt, 22010954014)) + if (XE_DEVICE_WA(tile->xe, 22010954014)) xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS); } diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h index 6a869b2de643..8fd6a5af0910 100644 --- a/drivers/gpu/drm/xe/xe_wa.h +++ b/drivers/gpu/drm/xe/xe_wa.h @@ -22,7 +22,7 @@ void xe_wa_process_engine(struct xe_hw_engine *hwe); void xe_wa_process_lrc(struct xe_hw_engine *hwe); void xe_wa_apply_tile_workarounds(struct xe_tile *tile); void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p); -void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p); +int xe_wa_gt_dump(struct xe_gt *gt, struct drm_printer *p); /** * XE_GT_WA - Out-of-band GT workarounds, to be queried and called as needed. diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index f3a6d5d239ce..fb38eb3d6e9a 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -11,10 +11,9 @@ 18020744125 PLATFORM(PVC) 1509372804 PLATFORM(PVC), GRAPHICS_STEP(A0, C0) 1409600907 GRAPHICS_VERSION_RANGE(1200, 1250) -14016763929 SUBPLATFORM(DG2, G10) +22014953428 SUBPLATFORM(DG2, G10) SUBPLATFORM(DG2, G12) 16017236439 PLATFORM(PVC) -22010954014 PLATFORM(DG2) 14019821291 MEDIA_VERSION_RANGE(1300, 2000) 14015076503 MEDIA_VERSION(1300) 16020292621 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0) @@ -34,18 +33,18 @@ 13011645652 GRAPHICS_VERSION(2004) GRAPHICS_VERSION_RANGE(3000, 3001) GRAPHICS_VERSION(3003) + GRAPHICS_VERSION_RANGE(3004, 3005) 14022293748 GRAPHICS_VERSION_RANGE(2001, 2002) GRAPHICS_VERSION(2004) - GRAPHICS_VERSION_RANGE(3000, 3001) - GRAPHICS_VERSION(3003) + GRAPHICS_VERSION_RANGE(3000, 3005) 22019794406 GRAPHICS_VERSION_RANGE(2001, 2002) GRAPHICS_VERSION(2004) GRAPHICS_VERSION_RANGE(3000, 3001) GRAPHICS_VERSION(3003) + GRAPHICS_VERSION_RANGE(3004, 3005) 22019338487 MEDIA_VERSION(2000) GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf) MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf) -22019338487_display PLATFORM(LUNARLAKE) 16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf) 14019789679 GRAPHICS_VERSION(1255) GRAPHICS_VERSION_RANGE(1270, 2004) @@ -63,11 +62,11 @@ 16023105232 GRAPHICS_VERSION_RANGE(2001, 3001) MEDIA_VERSION_RANGE(1301, 3000) MEDIA_VERSION(3002) - GRAPHICS_VERSION(3003) + GRAPHICS_VERSION_RANGE(3003, 3005) 16026508708 GRAPHICS_VERSION_RANGE(1200, 3001) MEDIA_VERSION_RANGE(1300, 3000) MEDIA_VERSION(3002) - GRAPHICS_VERSION(3003) + GRAPHICS_VERSION_RANGE(3003, 3005) 14020001231 GRAPHICS_VERSION_RANGE(2001,2004), FUNC(xe_rtp_match_psmi_enabled) MEDIA_VERSION(2000), FUNC(xe_rtp_match_psmi_enabled) MEDIA_VERSION(3000), FUNC(xe_rtp_match_psmi_enabled) @@ -75,9 +74,5 @@ 16023683509 MEDIA_VERSION(2000), FUNC(xe_rtp_match_psmi_enabled) MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_psmi_enabled) -# SoC workaround - currently applies to all platforms with the following -# primary GT GMDID -14022085890 GRAPHICS_VERSION(2001) - 15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER) 16026007364 MEDIA_VERSION(3000) diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 1bda7ef606cc..4fa45dbe1dcb 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -18,6 +18,7 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> +#include <drm/drm_print.h> #include <xen/platform_pci.h> #include <xen/xen.h> diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 63112ed975c4..386ae7441093 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -15,6 +15,7 @@ #include <drm/drm_gem.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <xen/balloon.h> diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c index 806ec66ee7f7..48772b5fe71c 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c @@ -16,6 +16,7 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c index 2bee0a2275ed..02f3a7d78cf8 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_kms.c +++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c @@ -19,6 +19,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_encoder.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_fourcc.h> @@ -363,10 +364,12 @@ static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm); - unsigned int pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + int ret; /* Enforce the alignment constraints of the DMA engine. */ - args->pitch = ALIGN(pitch, dpsub->dma_align); + ret = drm_mode_size_dumb(drm, args, dpsub->dma_align, 0); + if (ret) + return ret; return drm_gem_dma_dumb_create_internal(file_priv, drm, args); } |