summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c48
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c66
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c48
35 files changed, 413 insertions, 116 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b6879d97c9c9..1af2fa1591fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -180,6 +180,7 @@ extern uint amdgpu_smu_memory_pool_size;
extern uint amdgpu_dc_feature_mask;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dm_abm_level;
+extern int amdgpu_backlight;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
@@ -1065,7 +1066,7 @@ static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
return &adev->ddev;
}
-static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
{
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 36a741d63ddc..2e9b16fb3fcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -903,7 +903,7 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
*/
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
{
-#if defined(CONFIG_AMD_PMC)
+#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
if (adev->flags & AMD_IS_APU)
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 3107b9575929..5af464933976 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -40,13 +40,13 @@ static atomic_t fence_seq = ATOMIC_INIT(0);
* All the BOs in a process share an eviction fence. When process X wants
* to map VRAM memory but TTM can't find enough space, TTM will attempt to
* evict BOs from its LRU list. TTM checks if the BO is valuable to evict
- * by calling ttm_bo_driver->eviction_valuable().
+ * by calling ttm_device_funcs->eviction_valuable().
*
- * ttm_bo_driver->eviction_valuable() - will return false if the BO belongs
+ * ttm_device_funcs->eviction_valuable() - will return false if the BO belongs
* to process X. Otherwise, it will return true to indicate BO can be
* evicted by TTM.
*
- * If ttm_bo_driver->eviction_valuable returns true, then TTM will continue
+ * If ttm_device_funcs->eviction_valuable returns true, then TTM will continue
* the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move
* --> amdgpu_copy_buffer(). This sets up job in GPU scheduler.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ac0a432a9bf7..9f6b299cbf74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -119,6 +119,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
*/
#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
+static size_t amdgpu_amdkfd_acc_size(uint64_t size)
+{
+ size >>= PAGE_SHIFT;
+ size *= sizeof(dma_addr_t) + sizeof(void *);
+
+ return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
+ __roundup_pow_of_two(sizeof(struct ttm_tt)) +
+ PAGE_ALIGN(size);
+}
+
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 domain, bool sg)
{
@@ -127,8 +137,7 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
int ret = 0;
- acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
- sizeof(struct amdgpu_bo));
+ acc_size = amdgpu_amdkfd_acc_size(size);
vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
@@ -175,8 +184,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev,
{
size_t acc_size;
- acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
- sizeof(struct amdgpu_bo));
+ acc_size = amdgpu_amdkfd_acc_size(size);
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4575192d9b08..b26e2fd1c538 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -781,6 +781,10 @@ uint amdgpu_dm_abm_level;
MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
+int amdgpu_backlight = -1;
+MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
+module_param_named(backlight, amdgpu_backlight, bint, 0444);
+
/**
* DOC: tmz (int)
* Trusted Memory Zone (TMZ) is a method to protect data being written
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 51cd49c6f38f..24010cacf7d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
- ttm_bo_type_kernel, NULL, &gobj);
+ ttm_bo_type_device, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d56f4023ebb3..8e0a5650d383 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -487,7 +487,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
- timeout, ring->name);
+ timeout, NULL, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 0db933026722..fde2d899b2c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- struct page *dummy_page = ttm_bo_glob.dummy_read_page;
+ struct page *dummy_page = ttm_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index ff48101bab55..759b34799221 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
@@ -41,7 +41,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name);
- return;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
@@ -53,10 +53,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
} else {
drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev))
adev->virt.tdr_debug = true;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4b29b8205442..984dcf5a475e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -523,7 +523,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
- size_t acc_size;
int r;
/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
@@ -546,9 +545,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
*bo_ptr = NULL;
- acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
- sizeof(struct amdgpu_bo));
-
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
@@ -577,8 +573,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.priority = 1;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
- &bo->placement, page_align, &ctx, acc_size,
- NULL, bp->resv, &amdgpu_bo_destroy);
+ &bo->placement, page_align, &ctx, NULL,
+ bp->resv, &amdgpu_bo_destroy);
if (unlikely(r != 0))
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9fd2157b133a..a785acc09f20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -61,10 +61,10 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
-static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
-static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
@@ -646,7 +646,7 @@ out:
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
-static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
+static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct drm_mm_node *mm_node = mem->mm_node;
@@ -893,7 +893,7 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
*
* Called by amdgpu_ttm_backend_bind()
**/
-static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@ -931,7 +931,7 @@ release_sg:
/*
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/
-static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@ -1015,7 +1015,7 @@ gart_bind_fail:
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space.
*/
-static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
@@ -1155,7 +1155,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy().
*/
-static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@ -1180,7 +1180,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
gtt->bound = false;
}
-static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1234,7 +1234,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible
* to the underlying device.
*/
-static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{
@@ -1278,7 +1278,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
-static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1603,7 +1603,7 @@ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
amdgpu_bo_move_notify(bo, false, NULL);
}
-static struct ttm_bo_driver amdgpu_bo_driver = {
+static struct ttm_device_funcs amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
@@ -1785,7 +1785,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
+ r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb,
@@ -1926,7 +1926,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
- ttm_bo_device_release(&adev->mman.bdev);
+ ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}
@@ -2002,7 +2002,7 @@ unlock:
return ret;
}
-static struct vm_operations_struct amdgpu_ttm_vm_ops = {
+static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
.fault = amdgpu_ttm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index d2987536d7cd..7189f8370108 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -60,7 +60,7 @@ struct amdgpu_gtt_mgr {
};
struct amdgpu_mman {
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
bool initialized;
void __iomem *aper_base_kaddr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ad91c0c3c423..9d19078246c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -638,15 +638,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -660,7 +660,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
&bo->shadow->tbo.mem,
&vm->lru_bulk_move);
}
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
vm->bulk_moveable = true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 7944781e1086..ea825b4f8ee8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1862,7 +1862,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1981,8 +1980,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 1b6ff0470011..a360a6dec198 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1904,7 +1904,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2023,8 +2022,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 83a88385b762..ef124ac853b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1820,7 +1820,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1929,8 +1928,8 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 224b30214427..c98650183828 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1791,7 +1791,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1902,8 +1901,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3e1fd1e7d09f..55e39b462a5e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2267,6 +2267,11 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->ext_caps->bits.hdr_aux_backlight_control == 1)
caps->aux_support = true;
+ if (amdgpu_backlight == 0)
+ caps->aux_support = false;
+ else if (amdgpu_backlight == 1)
+ caps->aux_support = true;
+
/* From the specification (CTA-861-G), for calculating the maximum
* luminance we need to use:
* Luminance = 50*2**(CV/32)
@@ -3185,19 +3190,6 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
#endif
}
-static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
-{
- bool rc;
-
- if (!link)
- return 1;
-
- rc = dc_link_set_backlight_level_nits(link, true, brightness,
- AUX_BL_DEFAULT_TRANSITION_TIME_MS);
-
- return rc ? 0 : 1;
-}
-
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
unsigned *min, unsigned *max)
{
@@ -3260,9 +3252,10 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
brightness = convert_brightness_from_user(&caps, bd->props.brightness);
// Change brightness based on AUX property
if (caps.aux_support)
- return set_backlight_via_aux(link, brightness);
-
- rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
+ rc = dc_link_set_backlight_level_nits(link, true, brightness,
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+ else
+ rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
return rc ? 0 : 1;
}
@@ -3270,11 +3263,27 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
- int ret = dc_link_get_backlight_level(dm->backlight_link);
+ struct amdgpu_dm_backlight_caps caps;
+
+ amdgpu_dm_update_backlight_caps(dm);
+ caps = dm->backlight_caps;
+
+ if (caps.aux_support) {
+ struct dc_link *link = (struct dc_link *)dm->backlight_link;
+ u32 avg, peak;
+ bool rc;
- if (ret == DC_ERROR_UNEXPECTED)
- return bd->props.brightness;
- return convert_brightness_to_user(&dm->backlight_caps, ret);
+ rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
+ if (!rc)
+ return bd->props.brightness;
+ return convert_brightness_to_user(&caps, avg);
+ } else {
+ int ret = dc_link_get_backlight_level(dm->backlight_link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return bd->props.brightness;
+ return convert_brightness_to_user(&caps, ret);
+ }
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
@@ -4574,7 +4583,6 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
to_amdgpu_framebuffer(plane_state->fb);
- struct drm_format_name_buf format_name;
int ret;
memset(plane_info, 0, sizeof(*plane_info));
@@ -4622,8 +4630,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
break;
default:
DRM_ERROR(
- "Unsupported screen format %s\n",
- drm_get_format_name(fb->format->format, &format_name));
+ "Unsupported screen format %p4cc\n",
+ &fb->format->format);
return -EINVAL;
}
@@ -4716,6 +4724,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
+ dc_plane_state->flip_int_enabled = true;
/*
* Always set input transfer function, since plane state is refreshed
@@ -6505,8 +6514,10 @@ static int dm_plane_helper_check_state(struct drm_plane_state *state,
}
static int dm_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct amdgpu_device *adev = drm_to_adev(plane->dev);
struct dc *dc = adev->dm.dc;
struct dm_plane_state *dm_plane_state;
@@ -6514,23 +6525,24 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *new_crtc_state;
int ret;
- trace_amdgpu_dm_plane_atomic_check(state);
+ trace_amdgpu_dm_plane_atomic_check(new_plane_state);
- dm_plane_state = to_dm_plane_state(state);
+ dm_plane_state = to_dm_plane_state(new_plane_state);
if (!dm_plane_state->dc_state)
return 0;
new_crtc_state =
- drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
if (!new_crtc_state)
return -EINVAL;
- ret = dm_plane_helper_check_state(state, new_crtc_state);
+ ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
return ret;
- ret = fill_dc_scaling_info(state, &scaling_info);
+ ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
if (ret)
return ret;
@@ -6541,7 +6553,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
}
static int dm_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_plane_state *new_plane_state)
+ struct drm_atomic_state *state)
{
/* Only support async updates on cursor planes. */
if (plane->type != DRM_PLANE_TYPE_CURSOR)
@@ -6551,10 +6563,12 @@ static int dm_plane_atomic_async_check(struct drm_plane *plane,
}
static void dm_plane_atomic_async_update(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_plane_state *old_state =
- drm_atomic_get_old_plane_state(new_state->state, plane);
+ drm_atomic_get_old_plane_state(state, plane);
trace_amdgpu_dm_atomic_update_cursor(new_state);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fa5059f71727..bd0101013ec8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2602,7 +2602,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
if (pipe_ctx->plane_state == NULL)
frame_ramp = 0;
} else {
- ASSERT(false);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 4eee3a55fa30..18ed0d3f247e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -887,6 +887,7 @@ struct dc_plane_state {
int layer_index;
union surface_update_flags update_flags;
+ bool flip_int_enabled;
/* private to DC core */
struct dc_plane_status status;
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 9e796dfeac20..714c71a5fbde 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1257,6 +1257,16 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset)
REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0);
}
+void hubp1_set_flip_int(struct hubp *hubp)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCSURF_SURFACE_FLIP_INTERRUPT,
+ SURFACE_FLIP_INT_MASK, 1);
+
+ return;
+}
+
void hubp1_init(struct hubp *hubp)
{
//do nothing
@@ -1290,6 +1300,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.dmdata_load = NULL,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_in_blank = hubp1_in_blank,
+ .hubp_set_flip_int = hubp1_set_flip_int,
};
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index a9a6ed7f4f99..e2f2f6995935 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -74,6 +74,7 @@
SRI(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id),\
SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id),\
SRI(HUBPRET_CONTROL, HUBPRET, id),\
SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\
SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\
@@ -183,6 +184,7 @@
uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C; \
uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C; \
uint32_t DCSURF_SURFACE_CONTROL; \
+ uint32_t DCSURF_SURFACE_FLIP_INTERRUPT; \
uint32_t HUBPRET_CONTROL; \
uint32_t DCN_EXPANSION_MODE; \
uint32_t DCHUBP_REQ_SIZE_CONFIG; \
@@ -332,6 +334,7 @@
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
@@ -531,6 +534,7 @@
type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
type SECONDARY_SURFACE_DCC_EN;\
type SECONDARY_SURFACE_DCC_IND_64B_BLK;\
+ type SURFACE_FLIP_INT_MASK;\
type DET_BUF_PLANE1_BASE_ADDRESS;\
type CROSSBAR_SRC_CB_B;\
type CROSSBAR_SRC_CR_R;\
@@ -777,4 +781,6 @@ void hubp1_read_state_common(struct hubp *hubp);
bool hubp1_in_blank(struct hubp *hubp);
void hubp1_soft_reset(struct hubp *hubp, bool reset);
+void hubp1_set_flip_int(struct hubp *hubp);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 89912bb5014f..9ba5c624770d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2196,6 +2196,13 @@ static void dcn10_enable_plane(
if (dc->debug.sanity_checks) {
hws->funcs.verify_allow_pstate_change_high(dc);
}
+
+ if (!pipe_ctx->top_pipe
+ && pipe_ctx->plane_state
+ && pipe_ctx->plane_state->flip_int_enabled
+ && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
+ pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
+
}
void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 0df0da2e6a4d..bec7059f6d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1597,6 +1597,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.validate_dml_output = hubp2_validate_dml_output,
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 0726fb435e2a..5342c309b78c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1146,6 +1146,12 @@ void dcn20_enable_plane(
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
}
+ if (!pipe_ctx->top_pipe
+ && pipe_ctx->plane_state
+ && pipe_ctx->plane_state->flip_int_enabled
+ && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
+ pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
+
// if (dc->debug.sanity_checks) {
// dcn10_verify_allow_pstate_change_high(dc);
// }
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index f9045852728f..b0c9180b808f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -838,6 +838,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp21_init,
.validate_dml_output = hubp21_validate_dml_output,
+ .hubp_set_flip_int = hubp1_set_flip_int,
};
bool hubp21_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 072f8c880924..173488ab787a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.num_banks = 8,
.num_chans = 4,
.vmm_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 11.72,
+ .dram_clock_change_latency_us = 23.84,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,
@@ -1062,8 +1062,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
{
int i;
- DC_FP_START();
-
if (dc->bb_overrides.sr_exit_time_ns) {
for (i = 0; i < WM_SET_COUNT; i++) {
dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
@@ -1088,8 +1086,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
}
-
- DC_FP_END();
}
void dcn21_calculate_wm(
@@ -1339,7 +1335,7 @@ static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 88ffa9ff1ed1..f24612523248 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -511,6 +511,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_init = hubp3_init,
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 8d0f663489ac..fb7f1dea3c46 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -2508,6 +2508,19 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
};
+#define CTX ctx
+
+#define REG(reg_name) \
+ (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = REG_READ(CC_DC_PIPE_DIS);
+ /* Support for max 6 pipes */
+ value = value & 0x3f;
+ return value;
+}
+
static bool dcn30_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
@@ -2517,6 +2530,15 @@ static bool dcn30_resource_construct(
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
struct ddc_service_init_data ddc_init_data;
+ uint32_t pipe_fuses = read_pipe_fuses(ctx);
+ uint32_t num_pipes = 0;
+
+ if (!(pipe_fuses == 0 || pipe_fuses == 0x3e)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: Unexpected fuse recipe for navi2x !\n");
+ /* fault to single pipe */
+ pipe_fuses = 0x3e;
+ }
DC_FP_START();
@@ -2650,6 +2672,15 @@ static bool dcn30_resource_construct(
/* PP Lib and SMU interfaces */
init_soc_bounding_box(dc, pool);
+ num_pipes = dcn3_0_ip.max_num_dpp;
+
+ for (i = 0; i < dcn3_0_ip.max_num_dpp; i++)
+ if (pipe_fuses & 1 << i)
+ num_pipes--;
+
+ dcn3_0_ip.max_num_dpp = num_pipes;
+ dcn3_0_ip.max_num_otg = num_pipes;
+
dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
/* IRQ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 5d4b2c60192e..c494235016e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1619,12 +1619,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
}
+static void calculate_wm_set_for_vlevel(
+ int vlevel,
+ struct wm_range_table_entry *table_entry,
+ struct dcn_watermarks *wm_set,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt)
+{
+ double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
+
+ ASSERT(vlevel < dml->soc.num_states);
+ /* only pipe 0 is read for voltage and dcf/soc clocks */
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
+ pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
+
+ dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
+ dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
+ dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
+
+ wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+ wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
+ wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
+ dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
+
+}
+
+static void dcn301_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel_req)
+{
+ int i, pipe_idx;
+ int vlevel, vlevel_max;
+ struct wm_range_table_entry *table_entry;
+ struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
+
+ ASSERT(bw_params);
+
+ vlevel_max = bw_params->clk_table.num_entries - 1;
+
+ /* WM Set D */
+ table_entry = &bw_params->wm_table.entries[WM_D];
+ if (table_entry->wm_type == WM_TYPE_RETRAINING)
+ vlevel = 0;
+ else
+ vlevel = vlevel_max;
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ /* WM Set C */
+ table_entry = &bw_params->wm_table.entries[WM_C];
+ vlevel = min(max(vlevel_req, 2), vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ /* WM Set B */
+ table_entry = &bw_params->wm_table.entries[WM_B];
+ vlevel = min(max(vlevel_req, 1), vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+
+ /* WM Set A */
+ table_entry = &bw_params->wm_table.entries[WM_A];
+ vlevel = min(vlevel_req, vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+}
+
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
.panel_cntl_create = dcn301_panel_cntl_create,
.validate_bandwidth = dcn30_validate_bandwidth,
- .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
+ .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 22f3f643ed1b..346dcd87dc10 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -191,6 +191,8 @@ struct hubp_funcs {
bool (*hubp_in_blank)(struct hubp *hubp);
void (*hubp_soft_reset)(struct hubp *hubp, bool reset);
+ void (*hubp_set_flip_int)(struct hubp *hubp);
+
};
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index c57dc9ae81f2..a2681fe875ed 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5216,10 +5216,10 @@ static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
for (j = 0; j < dep_sclk_table->count; j++) {
valid_entry = false;
for (k = 0; k < watermarks->num_wm_sets; k++) {
- if (dep_sclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz &&
- dep_sclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz &&
- dep_mclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz &&
- dep_mclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz) {
+ if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 &&
+ dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 &&
+ dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 &&
+ dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) {
valid_entry = true;
table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 29c99642d22d..22b636e2b89b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -1505,6 +1505,48 @@ static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
return 0;
}
+static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ struct vega10_hwmgr *data =
+ (struct vega10_hwmgr *)(hwmgr->backend);
+ uint32_t pcie_gen = 0, pcie_width = 0;
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+ int i;
+
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ if (pp_table->PcieGenSpeed[i] > pcie_gen)
+ pp_table->PcieGenSpeed[i] = pcie_gen;
+
+ if (pp_table->PcieLaneCount[i] > pcie_width)
+ pp_table->PcieLaneCount[i] = pcie_width;
+ }
+
+ return 0;
+}
+
static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
{
int result = -1;
@@ -2556,6 +2598,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to initialize Link Level!",
return result);
+ result = vega10_override_pcie_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to override pcie parameters!",
+ return result);
+
result = vega10_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize Graphics Level!",
@@ -2922,6 +2969,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
return 0;
}
+
static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
{
struct vega10_hwmgr *data = hwmgr->backend;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index c0753029a8e2..43e01d880f7c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -481,6 +481,67 @@ static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
dpm_state->hard_max_level = 0xffff;
}
+static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+ int i;
+ int ret;
+
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+ * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
+ */
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
+ pp_table->PcieGenSpeed[i];
+ pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
+ pp_table->PcieLaneCount[i];
+
+ if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
+ pp_table->PcieLaneCount[i]) {
+ smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+ NULL);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[OverridePcieParameters] Attempt to override pcie params failed!",
+ return ret);
+ }
+
+ /* update the pptable */
+ pp_table->PcieGenSpeed[i] = pcie_gen_arg;
+ pp_table->PcieLaneCount[i] = pcie_width_arg;
+ }
+
+ return 0;
+}
+
static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
PPCLK_e clk_id, uint32_t *num_of_levels)
{
@@ -968,6 +1029,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to enable all smu features!",
return result);
+ result = vega12_override_pcie_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to override pcie parameters!",
+ return result);
+
tmp_result = vega12_power_control_set_level(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to power control set level!",
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 87811b005b85..f19964c69a00 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -831,7 +831,9 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
- uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
+ uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+ int i;
int ret;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
@@ -860,17 +862,27 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
* Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
*/
- smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
- ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
- NULL);
- PP_ASSERT_WITH_CODE(!ret,
- "[OverridePcieParameters] Attempt to override pcie params failed!",
- return ret);
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
+ pp_table->PcieGenSpeed[i];
+ pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
+ pp_table->PcieLaneCount[i];
+
+ if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
+ pp_table->PcieLaneCount[i]) {
+ smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+ NULL);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[OverridePcieParameters] Attempt to override pcie params failed!",
+ return ret);
+ }
- data->pcie_parameters_override = true;
- data->pcie_gen_level1 = pcie_gen;
- data->pcie_width_level1 = pcie_width;
+ /* update the pptable */
+ pp_table->PcieGenSpeed[i] = pcie_gen_arg;
+ pp_table->PcieLaneCount[i] = pcie_width_arg;
+ }
return 0;
}
@@ -3319,9 +3331,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
data->od8_settings.od8_settings_array;
OverDriveTable_t *od_table =
&(data->smc_state_table.overdrive_table);
- struct phm_ppt_v3_information *pptable_information =
- (struct phm_ppt_v3_information *)hwmgr->pptable;
- PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
+ PPTable_t *pptable = &(data->smc_state_table.pp_table);
struct pp_clock_levels_with_latency clocks;
struct vega20_single_dpm_table *fclk_dpm_table =
&(data->dpm_table.fclk_table);
@@ -3420,13 +3430,9 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
current_lane_width =
vega20_get_current_pcie_link_width_level(hwmgr);
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- if (i == 1 && data->pcie_parameters_override) {
- gen_speed = data->pcie_gen_level1;
- lane_width = data->pcie_width_level1;
- } else {
- gen_speed = pptable->PcieGenSpeed[i];
- lane_width = pptable->PcieLaneCount[i];
- }
+ gen_speed = pptable->PcieGenSpeed[i];
+ lane_width = pptable->PcieLaneCount[i];
+
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :