diff options
| author | Alex Deucher <alexander.deucher@amd.com> | 2024-11-15 17:44:01 -0500 |
|---|---|---|
| committer | Alex Deucher <alexander.deucher@amd.com> | 2025-02-27 15:52:29 -0500 |
| commit | f98675638f0a818a2eb802103b1e140b091358c4 (patch) | |
| tree | a77092513d9b29f57306f5515f2f2188f65995f7 /drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | |
| parent | cb107271687d569c3b706b974bcae6b9fdcfe9d0 (diff) | |
drm/amdgpu/vcn: switch vcn helpers to be instance based
Pass the instance to the helpers.
Reviewed-by: Boyuan Zhang <Boyuan.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 70 |
1 files changed, 46 insertions, 24 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index fbe599c539c8..a0b848ad097b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -114,7 +114,7 @@ static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev) static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; - int i; + int i, r; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) /* re-use enc ring as unified ring */ @@ -124,7 +124,13 @@ static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block) vcn_v4_0_3_set_irq_funcs(adev); vcn_v4_0_3_set_ras_funcs(adev); - return amdgpu_vcn_early_init(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + r = amdgpu_vcn_early_init(adev, i); + if (r) + return r; + } + + return 0; } static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx) @@ -156,16 +162,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); uint32_t *ptr; - r = amdgpu_vcn_sw_init(adev); - if (r) - return r; - - amdgpu_vcn_setup_ucode(adev); - - r = amdgpu_vcn_resume(adev); - if (r) - return r; - /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq); @@ -173,6 +169,17 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) return r; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + + r = amdgpu_vcn_sw_init(adev, i); + if (r) + return r; + + amdgpu_vcn_setup_ucode(adev, i); + + r = amdgpu_vcn_resume(adev, i); + if (r) + return r; + vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; @@ -261,16 +268,23 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); - r = amdgpu_vcn_suspend(adev); - if (r) - return r; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_suspend(adev, i); + if (r) + return r; + } amdgpu_vcn_sysfs_reset_mask_fini(adev); - r = amdgpu_vcn_sw_fini(adev); + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_sw_fini(adev, i); + if (r) + return r; + } kfree(adev->vcn.ip_dump); - return r; + return 0; } /** @@ -375,15 +389,20 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) */ static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block) { - int r; + struct amdgpu_device *adev = ip_block->adev; + int r, i; r = vcn_v4_0_3_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(ip_block->adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_suspend(adev, i); + if (r) + return r; + } - return r; + return 0; } /** @@ -395,11 +414,14 @@ static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block) */ static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block) { - int r; + struct amdgpu_device *adev = ip_block->adev; + int r, i; - r = amdgpu_vcn_resume(ip_block->adev); - if (r) - return r; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_resume(ip_block->adev, i); + if (r) + return r; + } r = vcn_v4_0_3_hw_init(ip_block); |