diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-20 15:36:38 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-20 15:36:38 -0800 |
| commit | d4a292c5f8e65d2784b703c67179f4f7d0c7846c (patch) | |
| tree | 0e3da362e92fea4acb77ceb7af1abe5713d90eac | |
| parent | eee3666c92406fd8e5e3084b0b3129528dfe9557 (diff) | |
| parent | ae9e8654579709c2f10b8c86a8467e1710d4599f (diff) | |
Merge tag 'drm-next-2026-02-21' of https://gitlab.freedesktop.org/drm/kernel
Pull drm fixes from Dave Airlie:
"This is the fixes and cleanups for the end of the merge window, it's
nearly all amdgpu, with some amdkfd, then a pagemap core fix, i915/xe
display fixes, and some xe driver fixes.
Nothing seems out of the ordinary, except amdgpu is a little more
volume than usual.
pagemap:
- drm/pagemap: pass pagemap_addr by reference
amdgpu:
- DML 2.1 fixes
- Panel replay fixes
- Display writeback fixes
- MES 11 old firmware compat fix
- DC CRC improvements
- DPIA fixes
- XGMI fixes
- ASPM fix
- SMU feature bit handling fixes
- DC LUT fixes
- RAS fixes
- Misc memory leak in error path fixes
- SDMA queue reset fixes
- PG handling fixes
- 5 level GPUVM page table fix
- SR-IOV fix
- Queue reset fix
- SMU 13.x fixes
- DC resume lag fix
- MPO fixes
- DCN 3.6 fix
- VSDB fixes
- HWSS clean up
- Replay fixes
- DCE cursor fixes
- DCN 3.5 SR DDR5 latency fixes
- HPD fixes
- Error path unwind fixes
- SMU13/14 mode1 reset fixes
- PSP 15 updates
- SMU 15 updates
- Sync fix in amdgpu_dma_buf_move_notify()
- HAINAN fix
- PSP 13.x fix
- GPUVM locking fix
- Fixes for DC analog support
- DC FAMS fixes
- DML 2.1 fixes
- eDP fixes
- Misc DC fixes
- Fastboot fix
- 3DLUT fixes
- GPUVM fixes
- 64bpp format fix
- Fix for MacBooks with switchable gfx
amdkfd:
- Fix possible double deletion of validate list
- Event setup fix
- Device disconnect regression fix
- APU GTT as VRAM fix
- Fix piority inversion with MQDs
- NULL check fix
radeon:
- HAINAN fix
i915/xe display:
- Regresion fix for HDR 4k displays (#15503)
- Fixup for Dell XPS 13 7390 eDP rate limit
- Memory leak fix on ACPI _DSM handling
- Add missing slice count check during DP mode validation
xe:
- drm/xe: Prevent VFs from exposing the CCS mode sysfs file
- SRIOV related fixes
- PAT cache fix
- MMIO read fix
- W/a fixes
- Adjust type of xe_modparam.force_vram_bar_size
- Wedge mode fix
- HWMon fix
* tag 'drm-next-2026-02-21' of https://gitlab.freedesktop.org/drm/kernel: (143 commits)
drm/amd/display: Remove unneeded DAC link encoder register
drm/amd/display: Enable DAC in DCE link encoder
drm/amd/display: Set CRTC source for DAC using registers
drm/amd/display: Initialize DAC in DCE link encoder using VBIOS
drm/amd/display: Turn off DAC in DCE link encoder using VBIOS
drm/amd/display: Don't call find_analog_engine() twice
drm/amdgpu: fix 4-level paging if GMC supports 57-bit VA v2
drm/amdgpu: keep vga memory on MacBooks with switchable graphics
drm/amdgpu: Set atomics to true for xgmi
drm/amdkfd: Check for NULL return values
drm/amd/display: Use same max plane scaling limits for all 64 bpp formats
drm/amdgpu: Set vmid0 PAGE_TABLE_DEPTH for GFX12.1
drm/amdkfd: Disable MQD queue priority
drm/amd/display: Remove conditional for shaper 3DLUT power-on
drm/amd/display: Check return of shaper curve to HW format
drm/amd/display: Correct logic check error for fastboot
drm/amd/display: Skip eDP detection when no sink
Revert "drm/amd/display: Add Gfx Base Case For Linear Tiling Handling"
Revert "drm/amd/display: Correct hubp GfxVersion verification"
Revert "drm/amd/display: Add Handling for gfxversion DcGfxBase"
...
201 files changed, 2120 insertions, 950 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index c126d1bf2bc8..02d5abf9df2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1186,8 +1186,10 @@ int amdgpu_acpi_enumerate_xcc(void) if (!dev_info) ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, sbdf); - if (ret == -ENOMEM) + if (ret == -ENOMEM) { + kfree(xcc_info); return ret; + } if (!dev_info) { kfree(xcc_info); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 877d0df50376..3bfd79c89df3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -317,8 +317,7 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev) { if (amdgpu_device_should_recover_gpu(adev)) - amdgpu_reset_domain_schedule(adev->reset_domain, - &adev->kfd.reset_work); + (void)amdgpu_reset_domain_schedule(adev->reset_domain, &adev->kfd.reset_work); } int amdgpu_amdkfd_alloc_kernel_mem(struct amdgpu_device *adev, size_t size, @@ -720,9 +719,8 @@ void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) if (gfx_block != NULL) gfx_block->version->funcs->set_powergating_state((void *)gfx_block, state); } - amdgpu_dpm_switch_power_profile(adev, - PP_SMC_POWER_PROFILE_COMPUTE, - !idle); + (void)amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_COMPUTE, !idle); + } bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c index 193ed8becab8..31b8fa52b42f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c @@ -107,7 +107,7 @@ static const char *amdkfd_fence_get_timeline_name(struct dma_fence *f) { struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f); - return fence->timeline_name; + return fence ? fence->timeline_name : NULL; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 4ce93536eeda..d8d834cf96e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -878,6 +878,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_bo *bo[2] = {NULL, NULL}; struct amdgpu_bo_va *bo_va; bool same_hive = false; + struct drm_exec exec; int i, ret; if (!va) { @@ -958,19 +959,25 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, goto unwind; } - /* Add BO to VM internal data structures */ - ret = amdgpu_bo_reserve(bo[i], false); - if (ret) { - pr_debug("Unable to reserve BO during memory attach"); - goto unwind; + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_until_all_locked(&exec) { + ret = amdgpu_vm_lock_pd(vm, &exec, 0); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto unwind; + ret = drm_exec_lock_obj(&exec, &bo[i]->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto unwind; } + bo_va = amdgpu_vm_bo_find(vm, bo[i]); if (!bo_va) bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); else ++bo_va->ref_count; attachment[i]->bo_va = bo_va; - amdgpu_bo_unreserve(bo[i]); + drm_exec_fini(&exec); if (unlikely(!attachment[i]->bo_va)) { ret = -ENOMEM; pr_err("Failed to add BO object to VM. ret == %d\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d591dce0f3b3..f3b5bcdbf2ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -892,8 +892,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo = e->bo; e->range = amdgpu_hmm_range_alloc(NULL); - if (unlikely(!e->range)) - return -ENOMEM; + if (unlikely(!e->range)) { + r = -ENOMEM; + goto out_free_user_pages; + } r = amdgpu_ttm_tt_get_user_pages(bo, e->range); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c7f44422939f..c3cb9570f0ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3504,9 +3504,6 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) } } - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); - amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - amdgpu_amdkfd_suspend(adev, true); amdgpu_amdkfd_teardown_processes(adev); amdgpu_userq_suspend(adev); @@ -4618,9 +4615,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a * internal path natively support atomics, set have_atomics_support to true. */ - } else if ((adev->flags & AMD_IS_APU) && - (amdgpu_ip_version(adev, GC_HWIP, 0) > - IP_VERSION(9, 0, 0))) { + } else if ((adev->flags & AMD_IS_APU && + amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) || + (adev->gmc.xgmi.connected_to_cpu && + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 1, 0))) { adev->have_atomics_support = true; } else { adev->have_atomics_support = @@ -4655,9 +4653,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, dev_info(adev->dev, "Pending hive reset.\n"); amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_MINIMAL_XGMI); - } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && - !amdgpu_device_has_display_hardware(adev)) { - r = psp_gpu_reset(adev); } else { tmp = amdgpu_reset_method; /* It should do a default reset when loading or reloading the driver, @@ -4902,6 +4897,9 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_virt_fini_data_exchange(adev); } + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized) { @@ -4924,7 +4922,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) * before ip_fini_early to prevent kfd locking refcount issues by calling * amdgpu_amdkfd_suspend() */ - if (drm_dev_is_unplugged(adev_to_drm(adev))) + if (pci_dev_is_disconnected(adev->pdev)) amdgpu_amdkfd_device_fini_sw(adev); amdgpu_device_ip_fini_early(adev); @@ -4936,7 +4934,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_gart_dummy_page_fini(adev); - if (drm_dev_is_unplugged(adev_to_drm(adev))) + if (pci_dev_is_disconnected(adev->pdev)) amdgpu_device_unmap_mmio(adev); } @@ -5733,6 +5731,9 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev) /* enable mmio access after mode 1 reset completed */ adev->no_hw_access = false; + /* ensure no_hw_access is updated before we access hw */ + smp_mb(); + amdgpu_device_load_pci_state(adev->pdev); ret = amdgpu_psp_wait_for_bootloader(adev); if (ret) @@ -7357,6 +7358,9 @@ void amdgpu_device_halt(struct amdgpu_device *adev) amdgpu_xcp_dev_unplug(adev); drm_dev_unplug(ddev); + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + amdgpu_irq_disable_all(adev); amdgpu_fence_driver_hw_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 41e63c286912..fc8c1f36be58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -2164,6 +2164,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 11): case IP_VERSION(13, 0, 12): case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 15): case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): case IP_VERSION(14, 0, 4): @@ -2988,9 +2989,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 5, 1): case IP_VERSION(11, 5, 2): case IP_VERSION(11, 5, 3): - case IP_VERSION(11, 5, 4): adev->family = AMDGPU_FAMILY_GC_11_5_0; break; + case IP_VERSION(11, 5, 4): + adev->family = AMDGPU_FAMILY_GC_11_5_4; + break; case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): case IP_VERSION(12, 1, 0): diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index b9c38a4fe546..656c267dbe58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -514,8 +514,15 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) r = dma_resv_reserve_fences(resv, 2); if (!r) r = amdgpu_vm_clear_freed(adev, vm, NULL); + + /* Don't pass 'ticket' to amdgpu_vm_handle_moved: we want the clear=true + * path to be used otherwise we might update the PT of another process + * while it's using the BO. + * With clear=true, amdgpu_vm_bo_update will sync to command submission + * from the same VM. + */ if (!r) - r = amdgpu_vm_handle_moved(adev, vm, ticket); + r = amdgpu_vm_handle_moved(adev, vm, NULL); if (r && r != -EBUSY) DRM_ERROR("Failed to invalidate VM page tables (%d))\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 5f9fa2140f09..5c90de58cc28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -232,6 +232,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; struct mm_struct *mm; + struct drm_exec exec; int r; mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); @@ -242,9 +243,18 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, !amdgpu_vm_is_bo_always_valid(vm, abo)) return -EPERM; - r = amdgpu_bo_reserve(abo, false); - if (r) - return r; + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); + drm_exec_until_all_locked(&exec) { + r = drm_exec_prepare_obj(&exec, &abo->tbo.base, 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto out_unlock; + + r = amdgpu_vm_lock_pd(vm, &exec, 0); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto out_unlock; + } amdgpu_vm_bo_update_shared(abo); bo_va = amdgpu_vm_bo_find(vm, abo); @@ -260,8 +270,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, amdgpu_bo_unreserve(abo); return r; } - - amdgpu_bo_unreserve(abo); + drm_exec_fini(&exec); /* Validate and add eviction fence to DMABuf imports with dynamic * attachment in compute VMs. Re-validation will be done by @@ -294,7 +303,10 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, } } mutex_unlock(&vm->process_info->lock); + return r; +out_unlock: + drm_exec_fini(&exec); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index b793ce17140c..6a6b334428f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -1068,6 +1068,16 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) case CHIP_RENOIR: adev->mman.keep_stolen_vga_memory = true; break; + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + /* MacBookPros with switchable graphics put VRAM at 0 when + * the iGPU is enabled which results in cursor issues if + * the cursor ends up at 0. Reserve vram at 0 in that case. + */ + if (adev->gmc.vram_start == 0) + adev->mman.keep_stolen_vga_memory = true; + break; default: adev->mman.keep_stolen_vga_memory = false; break; @@ -1436,7 +1446,7 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, if (!*exp_ranges) *exp_ranges = range_cnt; err: - kfree(ranges); + kvfree(ranges); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index e8e8bfa098c3..0e8a52d96573 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -33,9 +33,9 @@ #include "amdgpu_ras.h" /* VA hole for 48bit and 57bit addresses */ -#define AMDGPU_GMC_HOLE_START (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\ +#define AMDGPU_GMC_HOLE_START (adev->vm_manager.max_level == 4 ?\ 0x0100000000000000ULL : 0x0000800000000000ULL) -#define AMDGPU_GMC_HOLE_END (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\ +#define AMDGPU_GMC_HOLE_END (adev->vm_manager.max_level == 4 ?\ 0xff00000000000000ULL : 0xffff800000000000ULL) /* @@ -45,8 +45,8 @@ * This mask is used to remove the upper 16bits of the VA and so come up with * the linear addr value. */ -#define AMDGPU_GMC_HOLE_MASK (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\ - 0x00ffffffffffffffULL : 0x0000ffffffffffffULL) +#define AMDGPU_GMC_HOLE_MASK (adev->vm_manager.max_level == 4 ?\ + 0x01ffffffffffffffULL : 0x0000ffffffffffffULL) /* * Ring size as power of two for the log of recent faults. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 44f230d67da2..bfa64cd7a62d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -229,7 +229,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, r = amdgpu_vm_flush(ring, job, need_pipe_sync); if (r) { amdgpu_ring_undo(ring); - return r; + goto free_fence; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index aaf5477fcd7a..2c82d9e8c0be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -92,6 +92,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) struct drm_wedge_task_info *info = NULL; struct amdgpu_task_info *ti = NULL; struct amdgpu_device *adev = ring->adev; + enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET; int idx, r; if (!drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -135,13 +136,19 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) ring->funcs->reset) { dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); + /* Stop the scheduler to prevent anybody else from touching the ring buffer. */ + drm_sched_wqueue_stop(&ring->sched); r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence); if (!r) { + /* Start the scheduler again */ + drm_sched_wqueue_start(&ring->sched); atomic_inc(&ring->adev->gpu_reset_counter); dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name); drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, info); + /* This is needed to add the job back to the pending list */ + status = DRM_GPU_SCHED_STAT_NO_HANG; goto exit; } dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name); @@ -177,7 +184,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) exit: amdgpu_vm_put_task_info(ti); drm_dev_exit(idx); - return DRM_GPU_SCHED_STAT_RESET; + return status; } int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 1878e0faa722..f69332eed051 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1445,6 +1445,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv; + struct drm_exec exec; int r, pasid; /* Ensure IB tests are run on ring */ @@ -1484,7 +1485,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) if (r) goto error_pasid; + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); + drm_exec_until_all_locked(&exec) { + r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error_vm; + } + fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); + drm_exec_fini(&exec); if (!fpriv->prt_va) { r = -ENOMEM; goto error_vm; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index b0540b009e84..a7c7b378c696 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -148,6 +148,7 @@ static int psp_init_sriov_microcode(struct psp_context *psp) break; case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 15): ret = psp_init_cap_microcode(psp, ucode_prefix); ret &= psp_init_ta_microcode(psp, ucode_prefix); break; @@ -219,6 +220,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block) psp->autoload_supported = false; break; case IP_VERSION(13, 0, 12): + case IP_VERSION(13, 0, 15): psp_v13_0_set_psp_funcs(psp); psp->autoload_supported = false; adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); @@ -383,7 +385,8 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) return false; db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; @@ -735,7 +738,7 @@ psp_cmd_submit_buf(struct psp_context *psp, ras_intr = amdgpu_ras_intr_triggered(); if (ras_intr) break; - usleep_range(10, 100); + usleep_range(60, 100); amdgpu_device_invalidate_hdp(psp->adev, NULL); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index f582113d78b7..856b1bf83533 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -4352,7 +4352,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev) * to handle fatal error */ r = amdgpu_nbio_ras_sw_init(adev); if (r) - return r; + goto release_con; if (adev->nbio.ras && adev->nbio.ras->init_ras_controller_interrupt) { @@ -4650,6 +4650,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) amdgpu_ras_block_late_init_default(adev, &obj->ras_comm); } + amdgpu_ras_check_bad_page_status(adev); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 469d04a39d7d..6fba9d5b29ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -1701,10 +1701,12 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) } res = __verify_ras_table_checksum(control); - if (res) + if (res) { dev_err(adev->dev, "RAS table incorrect checksum or error:%d\n", res); + return -EINVAL; + } /* Warn if we are at 90% of the threshold or above */ @@ -1712,10 +1714,6 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d", control->ras_num_bad_pages, ras->bad_page_cnt_threshold); - if (amdgpu_bad_page_threshold != 0 && - control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) - amdgpu_dpm_send_rma_reason(adev); - } else if (hdr->header == RAS_TABLE_HDR_BAD && amdgpu_bad_page_threshold != 0) { if (hdr->version >= RAS_TABLE_VER_V2_1) { @@ -1932,3 +1930,26 @@ int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev, result); return -EOPNOTSUPP; } + +void amdgpu_ras_check_bad_page_status(struct amdgpu_device *adev) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct amdgpu_ras_eeprom_control *control = ras ? &ras->eeprom_control : NULL; + + if (!control || amdgpu_bad_page_threshold == 0) + return; + + if (control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) { + if (amdgpu_dpm_send_rma_reason(adev)) + dev_warn(adev->dev, "Unable to send out-of-band RMA CPER"); + else + dev_dbg(adev->dev, "Sent out-of-band RMA CPER"); + + if (adev->cper.enabled && !amdgpu_uniras_enabled(adev)) { + if (amdgpu_cper_generate_bp_threshold_record(adev)) + dev_warn(adev->dev, "Unable to send in-band RMA CPER"); + else + dev_dbg(adev->dev, "Sent in-band RMA CPER"); + } + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index 2e5d63957e71..a62114800a92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -193,6 +193,8 @@ int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control, int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control); +void amdgpu_ras_check_bad_page_status(struct amdgpu_device *adev); + extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops; extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index b82357c65723..129ad5138653 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -868,8 +868,6 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, struct amdgpu_fence *guilty_fence) { - /* Stop the scheduler to prevent anybody else from touching the ring buffer. */ - drm_sched_wqueue_stop(&ring->sched); /* back up the non-guilty commands */ amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence); } @@ -895,8 +893,6 @@ int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ring->ring_backup[i]); amdgpu_ring_commit(ring); } - /* Start the scheduler again */ - drm_sched_wqueue_start(&ring->sched); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 8b8a04138711..321310ba2c08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -558,6 +558,9 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, struct amdgpu_ring *gfx_ring = &sdma_instance->ring; struct amdgpu_ring *page_ring = &sdma_instance->page; + if (amdgpu_sriov_vf(adev)) + return -EOPNOTSUPP; + mutex_lock(&sdma_instance->engine_reset_mutex); if (!caller_handles_kernel_queues) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e549accf96ba..9a1db36e73b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1735,6 +1735,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, { struct amdgpu_bo_va *bo_va; + amdgpu_vm_assert_locked(vm); + bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); if (bo_va == NULL) { return NULL; @@ -2360,26 +2362,9 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, unsigned max_bits) { unsigned int max_size = 1 << (max_bits - 30); - bool sys_5level_pgtable = false; unsigned int vm_size; uint64_t tmp; -#ifdef CONFIG_X86_64 - /* - * Refer to function configure_5level_paging() for details. - */ - sys_5level_pgtable = (native_read_cr4() & X86_CR4_LA57); -#endif - - /* - * If GPU supports 5-level page table, but system uses 4-level page table, - * then use 4-level page table on GPU - */ - if (max_level == 4 && !sys_5level_pgtable) { - min_vm_size = 256 * 1024; - max_level = 3; - } - /* adjust vm size first */ if (amdgpu_vm_size != -1) { vm_size = amdgpu_vm_size; @@ -2415,6 +2400,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, } adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; + adev->vm_manager.max_level = max_level; tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); if (amdgpu_vm_block_size != -1) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 139642eacdd0..806d62ed61ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -456,6 +456,7 @@ struct amdgpu_vm_manager { bool concurrent_flush; uint64_t max_pfn; + uint32_t max_level; uint32_t num_level; uint32_t block_size; uint32_t fragment_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index aad530c46a9f..0ca6fa40a87c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -42,8 +42,6 @@ #define XGMI_STATE_DISABLE 0xD1 #define XGMI_STATE_LS0 0x81 -#define XGMI_LINK_ACTIVE 1 -#define XGMI_LINK_INACTIVE 0 static DEFINE_MUTEX(xgmi_mutex); @@ -365,9 +363,9 @@ int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num) return -ENOLINK; if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_LS0) - return XGMI_LINK_ACTIVE; + return AMDGPU_XGMI_LINK_ACTIVE; - return XGMI_LINK_INACTIVE; + return AMDGPU_XGMI_LINK_INACTIVE; } /** @@ -1176,7 +1174,7 @@ static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_ban switch (type) { case ACA_SMU_TYPE_UE: - if (ext_error_code != 0 && ext_error_code != 9) + if (ext_error_code != 0 && ext_error_code != 1 && ext_error_code != 9) count = 0ULL; bank->aca_err_type = ACA_ERROR_TYPE_UE; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_2_0.c index 7e917eb47a8c..a72770e3d0e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_2_0.c @@ -395,7 +395,10 @@ static void mmhub_v4_2_0_mid_enable_system_domain(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, - PAGE_TABLE_DEPTH, 0); + PAGE_TABLE_DEPTH, adev->gmc.vmid0_page_table_depth); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, + PAGE_TABLE_BLOCK_SIZE, + adev->gmc.vmid0_page_table_block_size); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index af4a7d7c4abd..d1e1a4369521 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -57,6 +57,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_12_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_12_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_14_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_14_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_15_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_15_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_1_toc.bin"); @@ -121,6 +123,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) case IP_VERSION(13, 0, 10): case IP_VERSION(13, 0, 12): case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 15): err = psp_init_sos_microcode(psp, ucode_prefix); if (err) return err; @@ -156,7 +159,8 @@ static void psp_v13_0_bootloader_print_status(struct psp_context *psp, if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) { at = 0; for_each_inst(i, adev->aid_mask) { bl_status_reg = @@ -202,7 +206,8 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp) retry_cnt = ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))) ? + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15))) ? PSP_VMBX_POLLING_LIMIT : 10; /* Wait for bootloader to signify that it is ready having bit 31 of @@ -232,7 +237,8 @@ static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp) if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) { ret = psp_v13_0_wait_for_vmbx_ready(psp); if (ret) amdgpu_ras_query_boot_status(adev, 4); @@ -872,7 +878,8 @@ static bool psp_v13_0_get_ras_capability(struct psp_context *psp) if ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) && + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) && (!(adev->flags & AMD_IS_APU))) { reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_127); adev->ras_hw_enabled = (reg_data & GENMASK_ULL(23, 0)); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v15_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v15_0.c index 3aca293e2f0c..723ddae17644 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v15_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v15_0.c @@ -45,6 +45,10 @@ static int psp_v15_0_0_init_microcode(struct psp_context *psp) if (err) return err; + err = psp_init_ta_microcode(psp, ucode_prefix); + if (err) + return err; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v15_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v15_0_8.c index 5249f5bd2a10..b2d7cbd894c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v15_0_8.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v15_0_8.c @@ -187,6 +187,26 @@ static void psp_v15_0_8_ring_set_wptr(struct psp_context *psp, uint32_t value) WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_67, value); } +static bool psp_v15_0_8_get_ras_capability(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + u32 reg_data; + + /* query ras cap should be done from host side */ + if (amdgpu_sriov_vf(adev)) + return false; + + if (!con) + return false; + + reg_data = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_127); + adev->ras_hw_enabled = (reg_data & GENMASK_ULL(23, 0)); + con->poison_supported = ((reg_data & GENMASK_ULL(24, 24)) >> 24) ? true : false; + + return true; +} + static int psp_v15_0_8_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) { @@ -334,6 +354,7 @@ static const struct psp_funcs psp_v15_0_8_funcs = { .ring_get_wptr = psp_v15_0_8_ring_get_wptr, .ring_set_wptr = psp_v15_0_8_ring_set_wptr, .get_fw_type = psp_v15_0_8_get_fw_type, + .get_ras_capability = psp_v15_0_8_get_ras_capability, }; void psp_v15_0_8_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index e77e079fe833..e3a035c9fece 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1424,18 +1424,9 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) adev->sdma.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); - switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { - case IP_VERSION(5, 0, 0): - case IP_VERSION(5, 0, 2): - case IP_VERSION(5, 0, 5): - if ((adev->sdma.instance[0].fw_version >= 35) && - !amdgpu_sriov_vf(adev) && - !adev->debug_disable_gpu_ring_reset) - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - break; - default: - break; - } + if (!amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; /* Allocate memory for SDMA IP Dump buffer */ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 50b51965c211..feebaa8cd9b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -1342,25 +1342,9 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) adev->sdma.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); - switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { - case IP_VERSION(5, 2, 0): - case IP_VERSION(5, 2, 2): - case IP_VERSION(5, 2, 3): - case IP_VERSION(5, 2, 4): - if ((adev->sdma.instance[0].fw_version >= 76) && - !amdgpu_sriov_vf(adev) && - !adev->debug_disable_gpu_ring_reset) - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - break; - case IP_VERSION(5, 2, 5): - if ((adev->sdma.instance[0].fw_version >= 34) && - !amdgpu_sriov_vf(adev) && - !adev->debug_disable_gpu_ring_reset) - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - break; - default: - break; - } + if (!amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; /* Allocate memory for SDMA IP Dump buffer */ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index eec659194718..b40126f5d3ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1364,18 +1364,9 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) adev->sdma.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); - switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { - case IP_VERSION(6, 0, 0): - case IP_VERSION(6, 0, 2): - case IP_VERSION(6, 0, 3): - if ((adev->sdma.instance[0].fw_version >= 21) && - !amdgpu_sriov_vf(adev) && - !adev->debug_disable_gpu_ring_reset) - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - break; - default: - break; - } + if (!amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; if (amdgpu_sdma_ras_sw_init(adev)) { dev_err(adev->dev, "Failed to initialize sdma ras block!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 54b14751fd7a..4e037a6978f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1478,7 +1478,8 @@ static void soc15_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) && (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) && (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 12)) && - (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14))) { + (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14)) && + (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 15))) { /* AMD_CG_SUPPORT_DRM_MGCG */ data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); if (!(data & 0x01000000)) diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index d9cc649d81ad..8122a5cacf07 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -422,6 +422,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev) case IP_VERSION(14, 0, 1): case IP_VERSION(14, 0, 4): case IP_VERSION(14, 0, 5): + case IP_VERSION(15, 0, 0): return AMD_RESET_METHOD_MODE2; default: if (amdgpu_dpm_is_baco_supported(adev)) @@ -838,9 +839,28 @@ static int soc21_common_early_init(struct amdgpu_ip_block *ip_block) break; case IP_VERSION(11, 5, 4): adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG | - AMD_CG_SUPPORT_JPEG_MGCG; + AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_FGCG | + AMD_CG_SUPPORT_REPEATER_FGCG | + AMD_CG_SUPPORT_GFX_PERF_CLK | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_DS | + AMD_CG_SUPPORT_HDP_SD | + AMD_CG_SUPPORT_ATHUB_MGCG | + AMD_CG_SUPPORT_ATHUB_LS | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN | - AMD_PG_SUPPORT_JPEG; + AMD_PG_SUPPORT_JPEG | + AMD_PG_SUPPORT_GFX_PG; adev->external_rev_id = adev->rev_id + 0x1; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index cebee453871c..006a15451197 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -521,7 +521,9 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) RREG32_SOC15(VCN, i, mmUVD_STATUS))) vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + /* VF doesn't enable interrupt operations for RAS */ + if (!amdgpu_sriov_vf(adev) && + amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 88621cb7d409..732ad1224a61 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2804,8 +2804,12 @@ static int runtime_enable(struct kfd_process *p, uint64_t r_debug, * SET_SHADER_DEBUGGER clears any stale process context data * saved in MES. */ - if (pdd->dev->kfd->shared_resources.enable_mes) - kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); + if (pdd->dev->kfd->shared_resources.enable_mes) { + ret = kfd_dbg_set_mes_debug_mode( + pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); + if (ret) + return ret; + } } p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 36ffc3c78536..a1087c13f241 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -2359,7 +2359,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, if (kdev->kfd->hive_id) { for (nid = 0; nid < proximity_domain; ++nid) { peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid); - if (!peer_dev->gpu) + if (!peer_dev || !peer_dev->gpu) continue; if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) continue; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 8f8a0975f1a7..0f7aa51b629e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -404,27 +404,25 @@ static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_i return -ENOMEM; } -static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id) +static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, u32 watch_id) { spin_lock(&pdd->dev->watch_points_lock); /* process owns device watch point so safe to clear */ - if ((pdd->alloc_watch_ids >> watch_id) & 0x1) { - pdd->alloc_watch_ids &= ~(0x1 << watch_id); - pdd->dev->alloc_watch_ids &= ~(0x1 << watch_id); + if (pdd->alloc_watch_ids & BIT(watch_id)) { + pdd->alloc_watch_ids &= ~BIT(watch_id); + pdd->dev->alloc_watch_ids &= ~BIT(watch_id); } spin_unlock(&pdd->dev->watch_points_lock); } -static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id) +static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, u32 watch_id) { bool owns_watch_id = false; spin_lock(&pdd->dev->watch_points_lock); - owns_watch_id = watch_id < MAX_WATCH_ADDRESSES && - ((pdd->alloc_watch_ids >> watch_id) & 0x1); - + owns_watch_id = pdd->alloc_watch_ids & BIT(watch_id); spin_unlock(&pdd->dev->watch_points_lock); return owns_watch_id; @@ -435,6 +433,9 @@ int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd, { int r; + if (watch_id >= MAX_WATCH_ADDRESSES) + return -EINVAL; + if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id)) return -EINVAL; @@ -472,6 +473,9 @@ int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, if (r) return r; + if (*watch_id >= MAX_WATCH_ADDRESSES) + return -EINVAL; + if (!pdd->dev->kfd->shared_resources.enable_mes) { r = debug_lock_and_unmap(pdd->dev->dqm); if (r) { @@ -519,10 +523,15 @@ int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags) int i, r = 0, rewind_count = 0; for (i = 0; i < target->n_pdds; i++) { + uint32_t caps; + uint32_t caps2; struct kfd_topology_device *topo_dev = - kfd_topology_device_by_id(target->pdds[i]->dev->id); - uint32_t caps = topo_dev->node_props.capability; - uint32_t caps2 = topo_dev->node_props.capability2; + kfd_topology_device_by_id(target->pdds[i]->dev->id); + if (!topo_dev) + return -EINVAL; + + caps = topo_dev->node_props.capability; + caps2 = topo_dev->node_props.capability2; if (!(caps & HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED) && (*flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP)) { @@ -575,9 +584,9 @@ int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags) continue; if (!pdd->dev->kfd->shared_resources.enable_mes) - debug_refresh_runlist(pdd->dev->dqm); + (void)debug_refresh_runlist(pdd->dev->dqm); else - kfd_dbg_set_mes_debug_mode(pdd, true); + (void)kfd_dbg_set_mes_debug_mode(pdd, true); } } @@ -637,9 +646,10 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind pr_err("Failed to release debug vmid on [%i]\n", pdd->dev->id); if (!pdd->dev->kfd->shared_resources.enable_mes) - debug_refresh_runlist(pdd->dev->dqm); + (void)debug_refresh_runlist(pdd->dev->dqm); else - kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); + (void)kfd_dbg_set_mes_debug_mode(pdd, + !kfd_dbg_has_cwsr_workaround(pdd->dev)); } kfd_dbg_set_workaround(target, false); @@ -1081,6 +1091,10 @@ int kfd_dbg_trap_device_snapshot(struct kfd_process *target, for (i = 0; i < tmp_num_devices; i++) { struct kfd_process_device *pdd = target->pdds[i]; struct kfd_topology_device *topo_dev = kfd_topology_device_by_id(pdd->dev->id); + if (!topo_dev) { + r = -EINVAL; + break; + } device_info.gpu_id = pdd->dev->id; device_info.exception_status = pdd->exception_status; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 1ad312af8ff0..13416bff7763 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -331,6 +331,12 @@ static int kfd_event_page_set(struct kfd_process *p, void *kernel_address, if (p->signal_page) return -EBUSY; + if (size < KFD_SIGNAL_EVENT_LIMIT * 8) { + pr_err("Event page size %llu is too small, need at least %lu bytes\n", + size, (unsigned long)(KFD_SIGNAL_EVENT_LIMIT * 8)); + return -EINVAL; + } + page = kzalloc(sizeof(*page), GFP_KERNEL); if (!page) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 90ac3a30e81d..76483d91af98 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -70,7 +70,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, static void set_priority(struct cik_mqd *m, struct queue_properties *q) { m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; - m->cp_hqd_queue_priority = q->priority; + /* m->cp_hqd_queue_priority = q->priority; */ } static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 97055f808d4a..0186b3de67c0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -70,7 +70,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q) { m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; - m->cp_hqd_queue_priority = q->priority; + /* m->cp_hqd_queue_priority = q->priority; */ } static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 7e5a7ab6d0c0..c9e397366782 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -96,7 +96,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q) { m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; - m->cp_hqd_queue_priority = q->priority; + /* m->cp_hqd_queue_priority = q->priority; */ } static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c index a51f217329db..3bbc2648f51d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c @@ -77,7 +77,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, static void set_priority(struct v12_compute_mqd *m, struct queue_properties *q) { m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; - m->cp_hqd_queue_priority = q->priority; + /* m->cp_hqd_queue_priority = q->priority; */ } static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c index d0776ba2cc99..0d6b601962eb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c @@ -131,7 +131,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, static void set_priority(struct v12_1_compute_mqd *m, struct queue_properties *q) { m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; - m->cp_hqd_queue_priority = q->priority; + /* m->cp_hqd_queue_priority = q->priority; */ } static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index dcf4bbfa641b..3622d8392cb3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -106,11 +106,14 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, static void set_priority(struct v9_mqd *m, struct queue_properties *q) { m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; - m->cp_hqd_queue_priority = q->priority; + /* m->cp_hqd_queue_priority = q->priority; */ } static bool mqd_on_vram(struct amdgpu_device *adev) { + if (adev->apu_prefer_gtt) + return false; + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): case IP_VERSION(9, 5, 0): diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 09483f0862d4..e63ef6442b35 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -73,7 +73,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, static void set_priority(struct vi_mqd *m, struct queue_properties *q) { m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; - m->cp_hqd_queue_priority = q->priority; + /* m->cp_hqd_queue_priority = q->priority; */ } static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 219d08f092db..8dc6ca8e9062 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1767,9 +1767,6 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, struct kfd_node *dev; int ret; - if (!drm_file) - return -EINVAL; - if (pdd->drm_priv) return -EBUSY; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0b4fc654e76f..c3c045c8144f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1244,6 +1244,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) struct dmub_srv *dmub_srv = adev->dm.dmub_srv; struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; const struct firmware *dmub_fw = adev->dm.dmub_fw; + struct dc *dc = adev->dm.dc; struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; struct abm *abm = adev->dm.dc->res_pool->abm; struct dc_context *ctx = adev->dm.dc->ctx; @@ -1349,18 +1350,15 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) for (i = 0; i < fb_info->num_fb; ++i) hw_params.fb[i] = &fb_info->fb[i]; - switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { - case IP_VERSION(3, 1, 3): - case IP_VERSION(3, 1, 4): - case IP_VERSION(3, 5, 0): - case IP_VERSION(3, 5, 1): - case IP_VERSION(3, 6, 0): - case IP_VERSION(4, 0, 1): + /* Enable usb4 dpia in the FW APU */ + if (dc->caps.is_apu && + dc->res_pool->usb4_dpia_count != 0 && + !dc->debug.dpia_debug.bits.disable_dpia) { hw_params.dpia_supported = true; - hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; - break; - default: - break; + hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia; + hw_params.dpia_hpd_int_enable_supported = false; + hw_params.enable_non_transparent_setconfig = dc->config.consolidated_dpia_dp_lt; + hw_params.disable_dpia_bw_allocation = !dc->config.usb4_bw_alloc_support; } switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { @@ -3481,7 +3479,17 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) struct dc_commit_streams_params commit_params = {}; if (dm->dc->caps.ips_support) { + if (!amdgpu_in_reset(adev)) + mutex_lock(&dm->dc_lock); + + /* Need to set POWER_STATE_D0 first or it will not execute + * idle_power_optimizations command to DMUB. + */ + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); + + if (!amdgpu_in_reset(adev)) + mutex_unlock(&dm->dc_lock); } if (amdgpu_in_reset(adev)) { @@ -3606,6 +3614,11 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) if (aconnector->mst_root) continue; + /* Skip eDP detection, when there is no sink present */ + if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_EDP && + !aconnector->dc_link->edp_sink_present) + continue; + guard(mutex)(&aconnector->hpd_lock); if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); @@ -10662,10 +10675,10 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm, wb_info->dwb_params.capture_rate = dwb_capture_rate_0; - wb_info->dwb_params.scaler_taps.h_taps = 4; - wb_info->dwb_params.scaler_taps.v_taps = 4; - wb_info->dwb_params.scaler_taps.h_taps_c = 2; - wb_info->dwb_params.scaler_taps.v_taps_c = 2; + wb_info->dwb_params.scaler_taps.h_taps = 1; + wb_info->dwb_params.scaler_taps.v_taps = 1; + wb_info->dwb_params.scaler_taps.h_taps_c = 1; + wb_info->dwb_params.scaler_taps.v_taps_c = 1; wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; @@ -10965,7 +10978,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) continue; } for (j = 0; j < status->plane_count; j++) - dummy_updates[j].surface = status->plane_states[0]; + dummy_updates[j].surface = status->plane_states[j]; sort(dummy_updates, status->plane_count, sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); @@ -11681,6 +11694,8 @@ static bool should_reset_plane(struct drm_atomic_state *state, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state; struct amdgpu_device *adev = drm_to_adev(plane->dev); + struct drm_connector_state *new_con_state; + struct drm_connector *connector; int i; /* @@ -11691,6 +11706,15 @@ static bool should_reset_plane(struct drm_atomic_state *state, state->allow_modeset) return true; + /* Check for writeback commit */ + for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + if (new_con_state->writeback_job) + return true; + } + if (amdgpu_in_reset(adev) && state->allow_modeset) return true; @@ -12289,10 +12313,9 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev, /* Overlay cursor not supported on HW before DCN * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions - * as previous DCN generations, so enable native mode on DCN401 in addition to DCE + * as previous DCN generations, so enable native mode on DCN401 */ - if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 || - amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { *cursor_mode = DM_CURSOR_NATIVE_MODE; return 0; } @@ -12612,6 +12635,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * need to be added for DC to not disable a plane by mistake */ if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) { + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0) { + drm_dbg(dev, "Overlay cursor not supported on DCE\n"); + ret = -EINVAL; + goto fail; + } + ret = drm_atomic_add_affected_planes(state, crtc); if (ret) goto fail; @@ -13129,6 +13158,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, u8 *edid_ext = NULL; int i; int j = 0; + int total_ext_block_len; if (edid == NULL || edid->extensions == 0) return -ENODEV; @@ -13140,7 +13170,8 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, break; } - while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { + total_ext_block_len = EDID_LENGTH * edid->extensions; + while (j < total_ext_block_len - sizeof(struct amd_vsdb_block)) { struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 5851f2d55dde..1b03f2bf8d7a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -506,6 +506,7 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, struct amdgpu_dm_connector *aconnector = NULL; bool enable = amdgpu_dm_is_valid_crc_source(source); int ret = 0; + enum crc_poly_mode crc_poly_mode = CRC_POLY_MODE_16; /* Configuration will be deferred to stream enable. */ if (!stream_state) @@ -528,10 +529,18 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, amdgpu_dm_replay_disable(stream_state); } + /* CRC polynomial selection only support for DCN3.6+ except DCN4.0.1 */ + if ((amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 6, 0)) && + (amdgpu_ip_version(adev, DCE_HWIP, 0) != IP_VERSION(4, 0, 1))) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + crc_poly_mode = acrtc->dm_irq_params.crc_poly_mode; + } + /* Enable or disable CRTC CRC generation */ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { if (!dc_stream_configure_crc(stream_state->ctx->dc, - stream_state, NULL, enable, enable, 0, true)) { + stream_state, NULL, enable, enable, 0, true, crc_poly_mode)) { ret = -EINVAL; goto unlock; } @@ -877,7 +886,7 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE) /* update ROI via dm*/ dc_stream_configure_crc(stream_state->ctx->dc, stream_state, - &crc_window, true, true, i, false); + &crc_window, true, true, i, false, (enum crc_poly_mode)acrtc->dm_irq_params.crc_poly_mode); reset_crc_frame_count[i] = true; @@ -901,7 +910,7 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE) /* Avoid ROI window get changed, keep overwriting. */ dc_stream_configure_crc(stream_state->ctx->dc, stream_state, - &crc_window, true, true, i, false); + &crc_window, true, true, i, false, (enum crc_poly_mode)acrtc->dm_irq_params.crc_poly_mode); /* crc ready for psp to read out */ crtc_ctx->crc_info.crc[i].crc_ready = true; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index b9ed29ec60dc..d6d43f1bf6d2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -46,6 +46,7 @@ #include "amdgpu_dm_psr.h" #endif +#define MULTIPLIER_TO_LR 270000 struct dmub_debugfs_trace_header { uint32_t entry_count; uint32_t reserved[3]; @@ -302,8 +303,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, switch (param[1]) { case LINK_RATE_LOW: + case LINK_RATE_RATE_2: + case LINK_RATE_RATE_3: case LINK_RATE_HIGH: case LINK_RATE_RBR2: + case LINK_RATE_RATE_6: case LINK_RATE_HIGH2: case LINK_RATE_HIGH3: case LINK_RATE_UHBR10: @@ -3504,6 +3508,10 @@ static ssize_t edp_ilr_write(struct file *f, const char __user *buf, uint8_t param_nums = 0; long param[2]; bool valid_input = true; + uint8_t supported_link_rates[16] = {0}; + uint32_t entry = 0; + uint32_t link_rate_in_khz = 0; + uint8_t dpcd_rev = 0; if (size == 0) return -EINVAL; @@ -3548,6 +3556,20 @@ static ssize_t edp_ilr_write(struct file *f, const char __user *buf, return size; } + if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates))) + return -EINVAL; + + dpcd_rev = link->dpcd_caps.dpcd_rev.raw; + if (dpcd_rev < DP_DPCD_REV_13 || + (supported_link_rates[entry + 1] == 0 && supported_link_rates[entry] == 0)) { + return size; + } + + entry = param[1] * 2; + link_rate_in_khz = (supported_link_rates[entry + 1] * 0x100 + + supported_link_rates[entry]) * 200; + /* save user force lane_count, link_rate to preferred settings * spread spectrum will not be changed */ @@ -3555,7 +3577,7 @@ static ssize_t edp_ilr_write(struct file *f, const char __user *buf, prefer_link_settings.lane_count = param[0]; prefer_link_settings.use_link_rate_set = true; prefer_link_settings.link_rate_set = param[1]; - prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]]; + prefer_link_settings.link_rate = link_rate_in_khz / MULTIPLIER_TO_LR; mutex_lock(&adev->dm.dc_lock); dc_link_set_preferred_training_settings(dc, &prefer_link_settings, @@ -3817,6 +3839,50 @@ static int crc_win_update_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get, crc_win_update_set, "%llu\n"); + +/* + * Trigger to set crc polynomial mode + * 0: 16-bit CRC, 1: 32-bit CRC + * only accepts 0 or 1 for supported hwip versions + */ +static int crc_poly_mode_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct amdgpu_crtc *acrtc; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + + if ((amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 6, 0)) && + (amdgpu_ip_version(adev, DCE_HWIP, 0) != IP_VERSION(4, 0, 1)) && + (val < 2)) { + acrtc = to_amdgpu_crtc(crtc); + mutex_lock(&adev->dm.dc_lock); + spin_lock_irq(&adev_to_drm(adev)->event_lock); + acrtc->dm_irq_params.crc_poly_mode = val; + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + mutex_unlock(&adev->dm.dc_lock); + } + + return 0; +} + +/* + * Get crc polynomial mode (0: 16-bit CRC, 1: 32-bit CRC) + */ +static int crc_poly_mode_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.crc_poly_mode; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_poly_mode_fops, crc_poly_mode_get, + crc_poly_mode_set, "%llu\n"); #endif void crtc_debugfs_init(struct drm_crtc *crtc) { @@ -3836,6 +3902,8 @@ void crtc_debugfs_init(struct drm_crtc *crtc) &crc_win_y_end_fops); debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, &crc_win_update_fops); + debugfs_create_file_unsafe("crc_poly_mode", 0644, dir, crtc, + &crc_poly_mode_fops); dput(dir); #endif debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 1f41d6540b83..bf2a356b3475 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -1153,11 +1153,19 @@ void dm_helpers_init_panel_settings( void dm_helpers_override_panel_settings( struct dc_context *ctx, - struct dc_panel_config *panel_config) + struct dc_link *link) { + unsigned int panel_inst = 0; + // Feature DSC if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) - panel_config->dsc.disable_dsc_edp = true; + link->panel_config.dsc.disable_dsc_edp = true; + + if (dc_get_edp_link_panel_inst(ctx->dc, link, &panel_inst) && panel_inst == 1) { + link->panel_config.psr.disable_psr = true; + link->panel_config.psr.disallow_psrsu = true;; + link->panel_config.psr.disallow_replay = true; + } } void *dm_helpers_allocate_gpu_mem( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index e7b0928bd3db..5948e2a6219e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -919,16 +919,15 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) continue; amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + dc_link = amdgpu_dm_connector->dc_link; + if (!dc_link) + continue; /* * Analog connectors may be hot-plugged unlike other connector * types that don't support HPD. Only poll analog connectors. */ - use_polling |= - amdgpu_dm_connector->dc_link && - dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id); - - dc_link = amdgpu_dm_connector->dc_link; + use_polling |= dc_connector_supports_analog(dc_link->link_id.id); /* * Get a base driver irq reference for hpd ints for the lifetime diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h index 3c9995275cbd..f0c1b0c1faa9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h @@ -39,6 +39,7 @@ struct dm_irq_params { #ifdef CONFIG_DEBUG_FS enum amdgpu_dm_pipe_crc_source crc_src; + int crc_poly_mode; /* enum crc_poly_mode from timing_generator.h */ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY struct crc_window_param window_param[MAX_CRC_WINDOW_NUM]; /* At least one CRC window is activated or not*/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index d3e62f511c8f..198064acf9f6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -1060,10 +1060,15 @@ static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev, *min_downscale = plane_cap->max_downscale_factor.nv12; break; + /* All 64 bpp formats have the same fp16 scaling limits */ case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616: + case DRM_FORMAT_ARGB16161616: + case DRM_FORMAT_XBGR16161616: + case DRM_FORMAT_ABGR16161616: *max_upscale = plane_cap->max_upscale_factor.fp16; *min_downscale = plane_cap->max_downscale_factor.fp16; break; @@ -1650,7 +1655,7 @@ dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm, MAX_COLOR_3DLUT_SIZE); } - if (dpp_color_caps.ogam_ram) { + if (dpp_color_caps.ogam_ram || dm->dc->caps.color.mpc.preblend) { drm_object_attach_property(&plane->base, mode_info.plane_blend_lut_property, 0); drm_object_attach_property(&plane->base, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index fd491b7a3cd7..99d6d6c93561 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -80,12 +80,20 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link) link->psr_settings.psr_feature_enabled = false; } else { + unsigned int panel_inst = 0; + if (link_supports_psrsu(link)) link->psr_settings.psr_version = DC_PSR_VERSION_SU_1; else link->psr_settings.psr_version = DC_PSR_VERSION_1; link->psr_settings.psr_feature_enabled = true; + + /*disable allow psr/psrsu/replay on eDP1*/ + if (dc_get_edp_link_panel_inst(link->ctx->dc, link, &panel_inst) && panel_inst == 1) { + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + link->psr_settings.psr_feature_enabled = false; + } } } diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 7277ed21552f..93d02956c5eb 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -22,7 +22,7 @@ # # Makefile for Display Core (dc) component. -DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp hubbub dccg hubp dio dwb hpo mmhubbub mpc opp pg +DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link dsc resource optc dpp hubbub dccg hubp dio dwb hpo mmhubbub mpc opp pg ifdef CONFIG_DRM_AMD_DC_FP diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 76a3559f0ddc..b692fa37402d 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -1874,8 +1874,7 @@ static void dac_encoder_control_prepare_params( uint8_t dac_standard) { params->ucDacStandard = dac_standard; - if (action == ENCODER_CONTROL_SETUP || - action == ENCODER_CONTROL_INIT) + if (action == ENCODER_CONTROL_INIT) params->ucAction = ATOM_ENCODER_INIT; else if (action == ENCODER_CONTROL_ENABLE) params->ucAction = ATOM_ENABLE; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 72558cc55a9a..6fc524752613 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -186,7 +186,7 @@ static int dcn35_get_active_display_cnt_wa( return display_count; } -static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, +void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; @@ -766,32 +766,32 @@ static struct wm_table ddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 28.0, - .sr_enter_plus_exit_time_us = 30.0, + .sr_exit_time_us = 31.0, + .sr_enter_plus_exit_time_us = 33.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 28.0, - .sr_enter_plus_exit_time_us = 30.0, + .sr_exit_time_us = 31.0, + .sr_enter_plus_exit_time_us = 33.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 28.0, - .sr_enter_plus_exit_time_us = 30.0, + .sr_exit_time_us = 31.0, + .sr_enter_plus_exit_time_us = 33.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 28.0, - .sr_enter_plus_exit_time_us = 30.0, + .sr_exit_time_us = 31.0, + .sr_enter_plus_exit_time_us = 33.0, .valid = true, }, } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h index a12a9bf90806..83e2263563fe 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h @@ -64,4 +64,10 @@ void dcn351_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_dcn35 *clk_mgr, struct pp_smu_funcs *pp_smu, struct dccg *dccg); + +void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower, + bool disable); + #endif //__DCN35_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index cb85b7ac2697..984b4bc5f53c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -53,7 +53,7 @@ #include "dpp.h" #include "timing_generator.h" #include "abm.h" -#include "virtual/virtual_link_encoder.h" +#include "dio/virtual/virtual_link_encoder.h" #include "hubp.h" #include "link_hwss.h" @@ -701,6 +701,7 @@ dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream, * once. * @idx: Capture CRC on which CRC engine instance * @reset: Reset CRC engine before the configuration + * @crc_poly_mode: CRC polynomial mode * * By default, the entire frame is used to calculate the CRC. * @@ -709,7 +710,7 @@ dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream, */ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, struct crc_params *crc_window, bool enable, bool continuous, - uint8_t idx, bool reset) + uint8_t idx, bool reset, enum crc_poly_mode crc_poly_mode) { struct pipe_ctx *pipe; struct crc_params param; @@ -733,6 +734,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, param.windowb_y_start = 0; param.windowb_x_end = pipe->stream->timing.h_addressable; param.windowb_y_end = pipe->stream->timing.v_addressable; + param.crc_poly_mode = crc_poly_mode; if (crc_window) { param.windowa_x_start = crc_window->windowa_x_start; @@ -3366,6 +3368,10 @@ static void copy_stream_update_to_stream(struct dc *dc, stream->scaler_sharpener_update = *update->scaler_sharpener_update; if (update->sharpening_required) stream->sharpening_required = *update->sharpening_required; + + if (update->drr_trigger_mode) { + stream->drr_trigger_mode = *update->drr_trigger_mode; + } } static void backup_planes_and_stream_state( @@ -3860,7 +3866,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc, if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) return; - if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) + if (!dc->config.frame_update_cmd_version2 && !dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) return; memset(&cmd, 0x0, sizeof(cmd)); @@ -3880,7 +3886,11 @@ void dc_dmub_update_dirty_rect(struct dc *dc, if (srf_updates[i].surface->flip_immediate) continue; - update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; + if (dc->config.frame_update_cmd_version2) + update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_2; + else + update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_1; + update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, sizeof(flip_addr->dirty_rects)); @@ -3894,6 +3904,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc, update_dirty_rect->panel_inst = panel_inst; update_dirty_rect->pipe_idx = j; + update_dirty_rect->otg_inst = pipe_ctx->stream_res.tg->inst; dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } } @@ -3916,7 +3927,7 @@ static void build_dmub_update_dirty_rect( if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) return; - if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) + if (!dc->config.frame_update_cmd_version2 && !dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) return; memset(&cmd, 0x0, sizeof(cmd)); @@ -3935,7 +3946,12 @@ static void build_dmub_update_dirty_rect( /* Do not send in immediate flip mode */ if (srf_updates[i].surface->flip_immediate) continue; - update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; + + if (dc->config.frame_update_cmd_version2) + update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_2; + else + update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_1; + update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, sizeof(flip_addr->dirty_rects)); @@ -3948,6 +3964,7 @@ static void build_dmub_update_dirty_rect( continue; update_dirty_rect->panel_inst = panel_inst; update_dirty_rect->pipe_idx = j; + update_dirty_rect->otg_inst = pipe_ctx->stream_res.tg->inst; dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; (*dmub_cmd_count)++; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 848c267ef11e..03d125f794b0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -37,7 +37,7 @@ #include "dpp.h" #include "core_types.h" #include "set_mode_types.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dpcd_defs.h" #include "link_enc_cfg.h" #include "link_service.h" @@ -45,7 +45,7 @@ #include "dc_state_priv.h" #include "dc_stream_priv.h" -#include "virtual/virtual_link_hwss.h" +#include "link/hwss/link_hwss_virtual.h" #include "link/hwss/link_hwss_dio.h" #include "link/hwss/link_hwss_dpia.h" #include "link/hwss/link_hwss_hpo_dp.h" diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index f59020f1a722..191f6435e7c6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -515,16 +515,18 @@ bool dc_stream_program_cursor_position( } } - /* apply manual trigger */ - int i; + if (stream->drr_trigger_mode == DRR_TRIGGER_ON_FLIP_AND_CURSOR) { + /* apply manual trigger */ + int i; - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; - /* trigger event on first pipe with current stream */ - if (stream == pipe_ctx->stream) { - pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); - break; + /* trigger event on first pipe with current stream */ + if (stream == pipe_ctx->stream) { + pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); + break; + } } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index ab19b6230945..4c4239cac863 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -63,7 +63,7 @@ struct dcn_dsc_reg_state; struct dcn_optc_reg_state; struct dcn_dccg_reg_state; -#define DC_VER "3.2.367" +#define DC_VER "3.2.369" /** * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC @@ -505,7 +505,6 @@ union allow_lttpr_non_transparent_mode { } bits; unsigned char raw; }; - /* Structure to hold configuration flags set by dm at dc creation. */ struct dc_config { bool gpu_vm_support; @@ -560,6 +559,7 @@ struct dc_config { bool enable_dpia_pre_training; bool unify_link_enc_assignment; bool enable_cursor_offload; + bool frame_update_cmd_version2; struct spl_sharpness_range dcn_sharpness_range; struct spl_sharpness_range dcn_override_sharpness_range; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index dc1b3f6c22c9..e4dd5ca70987 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1034,12 +1034,19 @@ static void dc_build_cursor_update_payload0( struct pipe_ctx *pipe_ctx, uint8_t p_idx, struct dmub_cmd_update_cursor_payload0 *payload) { + struct dc *dc = pipe_ctx->stream->ctx->dc; struct hubp *hubp = pipe_ctx->plane_res.hubp; unsigned int panel_inst = 0; - if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, - pipe_ctx->stream->link, &panel_inst)) - return; + if (dc->config.frame_update_cmd_version2 == true) { + /* Don't need panel_inst for command version2 */ + payload->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_2; + } else { + if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, + pipe_ctx->stream->link, &panel_inst)) + return; + payload->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_1; + } /* Payload: Cursor Rect is built from position & attribute * x & y are obtained from postion @@ -1052,8 +1059,8 @@ static void dc_build_cursor_update_payload0( payload->enable = hubp->pos.cur_ctl.bits.cur_enable; payload->pipe_idx = p_idx; - payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; payload->panel_inst = panel_inst; + payload->otg_inst = pipe_ctx->stream_res.tg->inst; } static void dc_build_cursor_position_update_payload0( diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 719b98d8e8ca..86394203cee7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -183,6 +183,11 @@ struct luminance_data { int dm_max_decrease_from_nominal; }; +enum dc_drr_trigger_mode { + DRR_TRIGGER_ON_FLIP = 0, + DRR_TRIGGER_ON_FLIP_AND_CURSOR, +}; + struct dc_stream_state { // sink is deprecated, new code should not reference // this pointer @@ -316,6 +321,8 @@ struct dc_stream_state { bool scaler_sharpener_update; bool sharpening_required; + enum dc_drr_trigger_mode drr_trigger_mode; + struct dc_update_scratch_space *update_scratch; }; @@ -366,6 +373,8 @@ struct dc_stream_update { bool *hw_cursor_req; bool *scaler_sharpener_update; bool *sharpening_required; + + enum dc_drr_trigger_mode *drr_trigger_mode; }; bool dc_is_stream_unchanged( @@ -584,7 +593,8 @@ bool dc_stream_configure_crc(struct dc *dc, bool enable, bool continuous, uint8_t idx, - bool reset); + bool reset, + enum crc_poly_mode crc_poly_mode); bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 0e953059ff6d..bddb16bb76d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -1230,7 +1230,7 @@ struct replay_settings { uint32_t replay_desync_error_fail_count; /* The frame skip number dal send to DMUB */ uint16_t frame_skip_number; - /* Current Panel Replay event */ + /* Current Panel Replay events */ uint32_t replay_events; }; @@ -1256,7 +1256,7 @@ struct dc_panel_config { unsigned int max_nonboost_brightness_millinits; unsigned int min_brightness_millinits; } nits_brightness; - /* PSR */ + /* PSR/Replay */ struct psr { bool disable_psr; bool disallow_psrsu; @@ -1266,6 +1266,8 @@ struct dc_panel_config { bool rc_allow_fullscreen_VPB; bool read_psrcap_again; unsigned int replay_enable_option; + bool enable_frame_skipping; + bool enable_teams_optimization; } psr; /* ABM */ struct varib { @@ -1282,6 +1284,27 @@ struct dc_panel_config { struct ilr { bool optimize_edp_link_rate; /* eDP ILR */ } ilr; + /* Adaptive VariBright*/ + struct adaptive_vb { + bool disable_adaptive_vb; + unsigned int default_abm_vb_levels; // default value = 0xDCAA6414 + unsigned int default_cacp_vb_levels; + unsigned int default_abm_vb_hdr_levels; // default value = 0xB4805A40 + unsigned int default_cacp_vb_hdr_levels; + unsigned int abm_scaling_factors; // default value = 0x23210012 + unsigned int cacp_scaling_factors; + unsigned int battery_life_configures; // default value = 0x0A141E + unsigned int abm_backlight_adaptive_pwl_1; // default value = 0x6A4F7244 + unsigned int abm_backlight_adaptive_pwl_2; // default value = 0x4C615659 + unsigned int abm_backlight_adaptive_pwl_3; // default value = 0x0064 + unsigned int cacp_backlight_adaptive_pwl_1; + unsigned int cacp_backlight_adaptive_pwl_2; + unsigned int cacp_backlight_adaptive_pwl_3; + } adaptive_vb; + /* Ramless Idle Opt*/ + struct rio { + bool disable_rio; + } rio; }; #define MAX_SINKS_PER_LINK 4 diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c index 33d8bd91cb01..733b85d450d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c @@ -131,6 +131,54 @@ void dccg2_otg_drop_pixel(struct dccg *dccg, void dccg2_init(struct dccg *dccg) { + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* Hardcoded register values for DCN20 + * These are specific to 100Mhz refclk + * Different ASICs with different refclk may override this in their own init + */ + REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x00120264); + REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x001186a0); + REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x0e01003c); + + if (REG(REFCLK_CNTL)) + REG_WRITE(REFCLK_CNTL, 0); +} + +void dccg2_refclk_setup(struct dccg *dccg) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* REFCLK programming that must occur after hubbub initialization */ + if (REG(REFCLK_CNTL)) + REG_WRITE(REFCLK_CNTL, 0); +} + +bool dccg2_is_s0i3_golden_init_wa_done(struct dccg *dccg) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + return REG_READ(MICROSECOND_TIME_BASE_DIV) == 0x00120464; +} + +void dccg2_allow_clock_gating(struct dccg *dccg, bool allow) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (allow) { + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + } else { + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0xFFFFFFFF); + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0xFFFFFFFF); + } +} + +void dccg2_enable_memory_low_power(struct dccg *dccg, bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, enable ? 0 : 1); } static const struct dccg_funcs dccg2_funcs = { @@ -139,7 +187,11 @@ static const struct dccg_funcs dccg2_funcs = { .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, .otg_add_pixel = dccg2_otg_add_pixel, .otg_drop_pixel = dccg2_otg_drop_pixel, - .dccg_init = dccg2_init + .dccg_init = dccg2_init, + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ + .allow_clock_gating = dccg2_allow_clock_gating, + .enable_memory_low_power = dccg2_enable_memory_low_power, + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ }; struct dccg *dccg2_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h index 8bdffd9ff31b..3711d400773a 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h @@ -37,7 +37,8 @@ SR(REFCLK_CNTL),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\ - SR(DISPCLK_FREQ_CHANGE_CNTL) + SR(DISPCLK_FREQ_CHANGE_CNTL),\ + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL) #define DCCG_REG_LIST_DCN2() \ DCCG_COMMON_REG_LIST_DCN_BASE(),\ @@ -81,7 +82,8 @@ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\ - DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh) + DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh),\ + DCCG_SF(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh) @@ -130,7 +132,8 @@ type DISPCLK_CHG_FWD_CORR_DISABLE;\ type DISPCLK_FREQ_CHANGE_CNTL;\ type OTG_ADD_PIXEL[MAX_PIPES];\ - type OTG_DROP_PIXEL[MAX_PIPES]; + type OTG_DROP_PIXEL[MAX_PIPES];\ + type DC_MEM_GLOBAL_PWR_REQ_DIS; #define DCCG3_REG_FIELD_LIST(type) \ type HDMICHARCLK0_EN;\ @@ -515,6 +518,11 @@ void dccg2_otg_drop_pixel(struct dccg *dccg, void dccg2_init(struct dccg *dccg); +void dccg2_refclk_setup(struct dccg *dccg); +void dccg2_allow_clock_gating(struct dccg *dccg, bool allow); +void dccg2_enable_memory_low_power(struct dccg *dccg, bool enable); +bool dccg2_is_s0i3_golden_init_wa_done(struct dccg *dccg); + struct dccg *dccg2_create( struct dc_context *ctx, const struct dccg_registers *regs, diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn201/dcn201_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn201/dcn201_dccg.c index 9a3402148fde..79d14ce19393 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn201/dcn201_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn201/dcn201_dccg.c @@ -24,6 +24,7 @@ */ #include "dcn201_dccg.h" +#include "dcn20/dcn20_dccg.h" #include "reg_helper.h" #include "core_types.h" @@ -56,7 +57,11 @@ static const struct dccg_funcs dccg201_funcs = { .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, .otg_add_pixel = dccg2_otg_add_pixel, .otg_drop_pixel = dccg2_otg_drop_pixel, - .dccg_init = dccg2_init + .dccg_init = dccg2_init, + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ + .allow_clock_gating = dccg2_allow_clock_gating, + .enable_memory_low_power = dccg2_enable_memory_low_power, + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ }; struct dccg *dccg201_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c index d07c04458d31..b48dcafbae66 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c @@ -103,7 +103,11 @@ static const struct dccg_funcs dccg21_funcs = { .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, .otg_add_pixel = dccg2_otg_add_pixel, .otg_drop_pixel = dccg2_otg_drop_pixel, - .dccg_init = dccg2_init + .dccg_init = dccg2_init, + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ + .allow_clock_gating = dccg2_allow_clock_gating, + .enable_memory_low_power = dccg2_enable_memory_low_power, + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ }; struct dccg *dccg21_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn30/dcn30_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn30/dcn30_dccg.c index d445dfefc047..adec7c3c2d49 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn30/dcn30_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn30/dcn30_dccg.c @@ -49,7 +49,11 @@ static const struct dccg_funcs dccg3_funcs = { .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, .otg_add_pixel = dccg2_otg_add_pixel, .otg_drop_pixel = dccg2_otg_drop_pixel, - .dccg_init = dccg2_init + .dccg_init = dccg2_init, + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ + .allow_clock_gating = dccg2_allow_clock_gating, + .enable_memory_low_power = dccg2_enable_memory_low_power, + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ }; struct dccg *dccg3_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.c index 97e9be87afd9..fc9bddd94b50 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.c @@ -48,7 +48,11 @@ static const struct dccg_funcs dccg301_funcs = { .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, .otg_add_pixel = dccg2_otg_add_pixel, .otg_drop_pixel = dccg2_otg_drop_pixel, - .dccg_init = dccg2_init + .dccg_init = dccg2_init, + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ + .allow_clock_gating = dccg2_allow_clock_gating, + .enable_memory_low_power = dccg2_enable_memory_low_power, + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ }; struct dccg *dccg301_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c index 97df04b7e39d..c647dff5234a 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c @@ -26,6 +26,7 @@ #include "reg_helper.h" #include "core_types.h" #include "dcn31_dccg.h" +#include "dcn20/dcn20_dccg.h" #include "dal_asic_id.h" #define TO_DCN_DCCG(dccg)\ @@ -850,6 +851,10 @@ static const struct dccg_funcs dccg31_funcs = { .disable_dsc = dccg31_disable_dscclk, .enable_dsc = dccg31_enable_dscclk, .dccg_read_reg_state = dccg31_read_reg_state, + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ + .allow_clock_gating = dccg2_allow_clock_gating, + .enable_memory_low_power = dccg2_enable_memory_low_power, + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ }; struct dccg *dccg31_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c index ef3db6beba25..2e9c4b13988a 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c @@ -29,6 +29,7 @@ #include "dcn31/dcn31_dccg.h" #include "dcn314_dccg.h" +#include "dcn20/dcn20_dccg.h" #define TO_DCN_DCCG(dccg)\ container_of(dccg, struct dcn_dccg, base) @@ -378,7 +379,11 @@ static const struct dccg_funcs dccg314_funcs = { .trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync, .set_valid_pixel_rate = dccg314_set_valid_pixel_rate, .set_dtbclk_p_src = dccg314_set_dtbclk_p_src, - .dccg_read_reg_state = dccg31_read_reg_state + .dccg_read_reg_state = dccg31_read_reg_state, + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ + .allow_clock_gating = dccg2_allow_clock_gating, + .enable_memory_low_power = dccg2_enable_memory_low_power, + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ }; struct dccg *dccg314_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c index 21a6ca5ca192..ce697c3249fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c @@ -26,6 +26,7 @@ #include "reg_helper.h" #include "core_types.h" #include "dcn32_dccg.h" +#include "dcn20/dcn20_dccg.h" #define TO_DCN_DCCG(dccg)\ container_of(dccg, struct dcn_dccg, base) @@ -347,6 +348,10 @@ static const struct dccg_funcs dccg32_funcs = { .get_pixel_rate_div = dccg32_get_pixel_rate_div, .trigger_dio_fifo_resync = dccg32_trigger_dio_fifo_resync, .set_dtbclk_p_src = dccg32_set_dtbclk_p_src, + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ + .allow_clock_gating = dccg2_allow_clock_gating, + .enable_memory_low_power = dccg2_enable_memory_low_power, + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ }; struct dccg *dccg32_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c index bd2f528137b2..943ec1983076 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c @@ -26,6 +26,7 @@ #include "core_types.h" #include "resource.h" #include "dcn35_dccg.h" +#include "dcn20/dcn20_dccg.h" #define TO_DCN_DCCG(dccg)\ container_of(dccg, struct dcn_dccg, base) @@ -1105,7 +1106,7 @@ static void dccg35_enable_dpstreamclk_new(struct dccg *dccg, dccg35_set_dpstreamclk_src_new(dccg, src, inst); } -static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg) +void dccg35_trigger_dio_fifo_resync(struct dccg *dccg) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); uint32_t dispclk_rdivider_value = 0; @@ -1114,6 +1115,7 @@ static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg) if (dispclk_rdivider_value != 0) REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); } + static void dccg35_wait_for_dentist_change_done( struct dccg *dccg) { @@ -1151,8 +1153,7 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg, } -static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, - int req_dppclk) +void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); @@ -1498,11 +1499,7 @@ static void dccg35_set_dpstreamclk( __func__, dp_hpo_inst, (src == REFCLK) ? 0 : 1, otg_inst); } - -static void dccg35_set_dpstreamclk_root_clock_gating( - struct dccg *dccg, - int dp_hpo_inst, - bool enable) +void dccg35_set_dpstreamclk_root_clock_gating(struct dccg *dccg, int dp_hpo_inst, bool enable) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); @@ -1669,10 +1666,7 @@ static void dccg35_set_valid_pixel_rate( dccg35_set_dtbclk_dto(dccg, &dto_params); } -static void dccg35_dpp_root_clock_control( - struct dccg *dccg, - unsigned int dpp_inst, - bool clock_on) +void dccg35_dpp_root_clock_control(struct dccg *dccg, unsigned int dpp_inst, bool clock_on) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); @@ -1704,9 +1698,7 @@ static void dccg35_dpp_root_clock_control( DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on); } -static void dccg35_disable_symclk32_se( - struct dccg *dccg, - int hpo_se_inst) +void dccg35_disable_symclk32_se(struct dccg *dccg, int hpo_se_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); @@ -1813,7 +1805,7 @@ void dccg35_enable_global_fgcg_rep(struct dccg *dccg, bool value) REG_UPDATE(DCCG_GLOBAL_FGCG_REP_CNTL, DCCG_GLOBAL_FGCG_REP_DIS, !value); } -static void dccg35_enable_dscclk(struct dccg *dccg, int inst) +void dccg35_enable_dscclk(struct dccg *dccg, int inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); @@ -1860,8 +1852,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst) udelay(10); } -static void dccg35_disable_dscclk(struct dccg *dccg, - int inst) +void dccg35_disable_dscclk(struct dccg *dccg, int inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); @@ -1906,7 +1897,7 @@ static void dccg35_disable_dscclk(struct dccg *dccg, udelay(10); } -static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) +void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); @@ -2013,7 +2004,7 @@ static uint8_t dccg35_get_number_enabled_symclk_fe_connected_to_be(struct dccg * return num_enabled_symclk_fe; } -static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) +void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) { uint8_t num_enabled_symclk_fe = 0; struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); @@ -2421,6 +2412,10 @@ static const struct dccg_funcs dccg35_funcs_new = { .enable_symclk_se = dccg35_enable_symclk_se_cb, .disable_symclk_se = dccg35_disable_symclk_se_cb, .set_dtbclk_p_src = dccg35_set_dtbclk_p_src_cb, + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ + .allow_clock_gating = dccg2_allow_clock_gating, + .enable_memory_low_power = dccg2_enable_memory_low_power, + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ }; static const struct dccg_funcs dccg35_funcs = { @@ -2452,8 +2447,12 @@ static const struct dccg_funcs dccg35_funcs = { .enable_symclk_se = dccg35_enable_symclk_se, .disable_symclk_se = dccg35_disable_symclk_se, .set_dtbclk_p_src = dccg35_set_dtbclk_p_src, + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ + .allow_clock_gating = dccg2_allow_clock_gating, + .enable_memory_low_power = dccg2_enable_memory_low_power, + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done, /* Deprecated - for backward compatibility only */ .dccg_root_gate_disable_control = dccg35_root_gate_disable_control, - .dccg_read_reg_state = dccg31_read_reg_state, + .dccg_read_reg_state = dccg31_read_reg_state }; struct dccg *dccg35_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h index 7b9c36456cd9..554700287c1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h @@ -249,8 +249,25 @@ struct dccg *dccg35_create( void dccg35_init(struct dccg *dccg); +void dccg35_trigger_dio_fifo_resync(struct dccg *dccg); + +void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); + void dccg35_enable_global_fgcg_rep(struct dccg *dccg, bool value); void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); +void dccg35_set_dpstreamclk_root_clock_gating(struct dccg *dccg, int dp_hpo_inst, bool enable); + +void dccg35_set_hdmistreamclk_root_clock_gating(struct dccg *dccg, bool enable); + +void dccg35_dpp_root_clock_control(struct dccg *dccg, unsigned int dpp_inst, bool clock_on); + +void dccg35_disable_symclk32_se(struct dccg *dccg, int hpo_se_inst); + +void dccg35_enable_dscclk(struct dccg *dccg, int inst); +void dccg35_disable_dscclk(struct dccg *dccg, int inst); + +void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); +void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); #endif //__DCN35_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index 663a18ee5162..f1d394560892 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -27,6 +27,7 @@ #include "core_types.h" #include "dcn401_dccg.h" #include "dcn31/dcn31_dccg.h" +#include "dcn20/dcn20_dccg.h" /* #include "dmub_common.h" @@ -595,16 +596,6 @@ void dccg401_set_dp_dto( bool enable = false; - if (params->otg_inst > 3) { - /* dcn401 only has 4 instances */ - BREAK_TO_DEBUGGER(); - return; - } - if (!params->refclk_hz) { - BREAK_TO_DEBUGGER(); - return; - } - if (!dc_is_tmds_signal(params->signal)) { uint64_t dto_integer; uint64_t dto_phase_hz; @@ -612,6 +603,11 @@ void dccg401_set_dp_dto( enable = true; + if (!params->refclk_hz) { + BREAK_TO_DEBUGGER(); + return; + } + /* Set DTO values: * int = target_pix_rate / reference_clock * phase = target_pix_rate - int * reference_clock, @@ -866,6 +862,7 @@ static const struct dccg_funcs dccg401_funcs = { .update_dpp_dto = dccg401_update_dpp_dto, .get_dccg_ref_freq = dccg401_get_dccg_ref_freq, .dccg_init = dccg401_init, + .allow_clock_gating = dccg2_allow_clock_gating, .set_dpstreamclk = dccg401_set_dpstreamclk, .enable_symclk32_se = dccg31_enable_symclk32_se, .disable_symclk32_se = dccg31_disable_symclk32_se, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 7f0766b5fa3d..2ba3d3a3aac5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -102,6 +102,7 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = { .enable_dp_output = dce110_link_encoder_enable_dp_output, .enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output, .enable_lvds_output = dce110_link_encoder_enable_lvds_output, + .enable_analog_output = dce110_link_encoder_enable_analog_output, .disable_output = dce110_link_encoder_disable_output, .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern, @@ -133,6 +134,21 @@ static enum bp_result link_transmitter_control( return result; } +static enum bp_result link_dac_encoder_control( + struct dce110_link_encoder *link_enc, + enum bp_encoder_control_action action, + uint32_t pix_clk_100hz) +{ + struct dc_bios *bios = link_enc->base.ctx->dc_bios; + struct bp_encoder_control encoder_control = {0}; + + encoder_control.action = action; + encoder_control.engine_id = link_enc->base.analog_engine; + encoder_control.pixel_clock = pix_clk_100hz / 10; + + return bios->funcs->encoder_control(bios, &encoder_control); +} + static void enable_phy_bypass_mode( struct dce110_link_encoder *enc110, bool enable) @@ -1021,6 +1037,16 @@ void dce110_link_encoder_hw_init( cntl.coherent = false; cntl.hpd_sel = enc110->base.hpd_source; + if (enc110->base.analog_engine != ENGINE_ID_UNKNOWN) { + result = link_dac_encoder_control(enc110, ENCODER_CONTROL_INIT, 0); + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table for DAC!\n", + __func__); + BREAK_TO_DEBUGGER(); + return; + } + } + /* The code below is only applicable to encoders with a digital transmitter. */ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN) return; @@ -1175,6 +1201,22 @@ void dce110_link_encoder_enable_lvds_output( } } +void dce110_link_encoder_enable_analog_output( + struct link_encoder *enc, + uint32_t pixel_clock) +{ + struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); + enum bp_result result; + + result = link_dac_encoder_control(enc110, ENCODER_CONTROL_ENABLE, pixel_clock); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + } +} + /* enables DP PHY output */ void dce110_link_encoder_enable_dp_output( struct link_encoder *enc, @@ -1345,19 +1387,8 @@ void dce110_link_encoder_disable_output( struct bp_transmitter_control cntl = { 0 }; enum bp_result result; - switch (enc->analog_engine) { - case ENGINE_ID_DACA: - REG_UPDATE(DAC_ENABLE, DAC_ENABLE, 0); - break; - case ENGINE_ID_DACB: - /* DACB doesn't seem to be present on DCE6+, - * although there are references to it in the register file. - */ - DC_LOG_ERROR("%s DACB is unsupported\n", __func__); - break; - default: - break; - } + if (enc->analog_engine != ENGINE_ID_UNKNOWN) + link_dac_encoder_control(enc110, ENCODER_CONTROL_DISABLE, 0); /* The code below only applies to connectors that support digital signals. */ if (enc->transmitter == TRANSMITTER_UNKNOWN) @@ -1804,6 +1835,7 @@ static const struct link_encoder_funcs dce60_lnk_enc_funcs = { .enable_dp_output = dce60_link_encoder_enable_dp_output, .enable_dp_mst_output = dce60_link_encoder_enable_dp_mst_output, .enable_lvds_output = dce110_link_encoder_enable_lvds_output, + .enable_analog_output = dce110_link_encoder_enable_analog_output, .disable_output = dce110_link_encoder_disable_output, .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = dce60_link_encoder_dp_set_phy_pattern, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 9ba533aa6f88..8841000361fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -101,8 +101,7 @@ SRI(DP_SEC_CNTL, DP, id), \ SRI(DP_VID_STREAM_CNTL, DP, id), \ SRI(DP_DPHY_FAST_TRAINING, DP, id), \ - SRI(DP_SEC_CNTL1, DP, id), \ - SR(DAC_ENABLE) + SRI(DP_SEC_CNTL1, DP, id) #endif #define LE_DCE80_REG_LIST(id)\ @@ -268,6 +267,11 @@ void dce110_link_encoder_enable_lvds_output( enum clock_source_id clock_source, uint32_t pixel_clock); +/* enables analog output from the DAC */ +void dce110_link_encoder_enable_analog_output( + struct link_encoder *enc, + uint32_t pixel_clock); + /* disable PHY output */ void dce110_link_encoder_disable_output( struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 574618d5d4a4..87c19f17c799 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -1498,7 +1498,10 @@ static void dig_connect_to_otg( { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); - REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst); + if (enc->id == ENGINE_ID_DACA || enc->id == ENGINE_ID_DACB) + REG_UPDATE(DAC_SOURCE_SELECT, DAC_SOURCE_SELECT, tg_inst); + else + REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst); } static unsigned int dig_source_otg( @@ -1507,7 +1510,10 @@ static unsigned int dig_source_otg( uint32_t tg_inst = 0; struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); - REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst); + if (enc->id == ENGINE_ID_DACA || enc->id == ENGINE_ID_DACB) + REG_GET(DAC_SOURCE_SELECT, DAC_SOURCE_SELECT, &tg_inst); + else + REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst); return tg_inst; } @@ -1568,16 +1574,25 @@ void dce110_stream_encoder_construct( enc110->se_mask = se_mask; } -static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {}; +static const struct stream_encoder_funcs dce110_an_str_enc_funcs = { + .dig_connect_to_otg = dig_connect_to_otg, + .dig_source_otg = dig_source_otg, +}; void dce110_analog_stream_encoder_construct( struct dce110_stream_encoder *enc110, struct dc_context *ctx, struct dc_bios *bp, - enum engine_id eng_id) + enum engine_id eng_id, + const struct dce110_stream_enc_registers *regs, + const struct dce_stream_encoder_shift *se_shift, + const struct dce_stream_encoder_mask *se_mask) { enc110->base.funcs = &dce110_an_str_enc_funcs; enc110->base.ctx = ctx; enc110->base.id = eng_id; enc110->base.bp = bp; + enc110->regs = regs; + enc110->se_shift = se_shift; + enc110->se_mask = se_mask; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h index 068de1392121..342c0afe6a94 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h @@ -65,6 +65,7 @@ SRI(AFMT_60958_1, DIG, id), \ SRI(AFMT_60958_2, DIG, id), \ SRI(DIG_FE_CNTL, DIG, id), \ + SR(DAC_SOURCE_SELECT), \ SRI(HDMI_CONTROL, DIG, id), \ SRI(HDMI_GC, DIG, id), \ SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ @@ -290,7 +291,8 @@ #define SE_COMMON_MASK_SH_LIST_DCE80_100(mask_sh)\ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\ SE_SF(TMDS_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\ - SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh) + SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ + SE_SF(DAC_SOURCE_SELECT, DAC_SOURCE_SELECT, mask_sh) #define SE_COMMON_MASK_SH_LIST_DCE110(mask_sh)\ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\ @@ -494,6 +496,7 @@ struct dce_stream_encoder_shift { uint8_t DP_VID_N_MUL; uint8_t DP_VID_M_DOUBLE_VALUE_EN; uint8_t DIG_SOURCE_SELECT; + uint8_t DAC_SOURCE_SELECT; }; struct dce_stream_encoder_mask { @@ -626,6 +629,7 @@ struct dce_stream_encoder_mask { uint32_t DP_VID_N_MUL; uint32_t DP_VID_M_DOUBLE_VALUE_EN; uint32_t DIG_SOURCE_SELECT; + uint32_t DAC_SOURCE_SELECT; }; struct dce110_stream_enc_registers { @@ -653,6 +657,7 @@ struct dce110_stream_enc_registers { uint32_t AFMT_60958_1; uint32_t AFMT_60958_2; uint32_t DIG_FE_CNTL; + uint32_t DAC_SOURCE_SELECT; uint32_t DP_MSE_RATE_CNTL; uint32_t DP_MSE_RATE_UPDATE; uint32_t DP_PIXEL_FORMAT; @@ -712,7 +717,10 @@ void dce110_analog_stream_encoder_construct( struct dce110_stream_encoder *enc110, struct dc_context *ctx, struct dc_bios *bp, - enum engine_id eng_id); + enum engine_id eng_id, + const struct dce110_stream_enc_registers *regs, + const struct dce_stream_encoder_shift *se_shift, + const struct dce_stream_encoder_mask *se_mask); void dce110_se_audio_mute_control( struct stream_encoder *enc, bool mute); diff --git a/drivers/gpu/drm/amd/display/dc/dio/Makefile b/drivers/gpu/drm/amd/display/dc/dio/Makefile index 0dfd480976f7..2f5619078e1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dio/Makefile @@ -23,11 +23,20 @@ # # +############################################################################### +# VIRTUAL +############################################################################### +DIO_VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o + +AMD_DAL_DIO_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/dio/virtual/,$(DIO_VIRTUAL)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DIO_VIRTUAL) + ifdef CONFIG_DRM_AMD_DC_FP ############################################################################### # DCN10 ############################################################################### -DIO_DCN10 = dcn10_link_encoder.o dcn10_stream_encoder.o +DIO_DCN10 = dcn10_link_encoder.o dcn10_stream_encoder.o dcn10_dio.o AMD_DAL_DIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/dio/dcn10/,$(DIO_DCN10)) diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.c new file mode 100644 index 000000000000..edcf4e67483b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2025 Advanced Micro Devices, Inc. + +#include "dc_hw_types.h" +#include "dm_services.h" +#include "reg_helper.h" +#include "dcn10_dio.h" + +#define CTX \ + dio10->base.ctx +#define REG(reg)\ + dio10->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + dio10->shifts->field_name, dio10->masks->field_name + +static void dcn10_dio_mem_pwr_ctrl(struct dio *dio, bool enable_i2c_light_sleep) +{ + struct dcn10_dio *dio10 = TO_DCN10_DIO(dio); + + /* power AFMT HDMI memory */ + REG_WRITE(DIO_MEM_PWR_CTRL, 0); + + if (enable_i2c_light_sleep) + REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); +} + +static const struct dio_funcs dcn10_dio_funcs = { + .mem_pwr_ctrl = dcn10_dio_mem_pwr_ctrl, +}; + +void dcn10_dio_construct( + struct dcn10_dio *dio10, + struct dc_context *ctx, + const struct dcn_dio_registers *regs, + const struct dcn_dio_shift *shifts, + const struct dcn_dio_mask *masks) +{ + dio10->base.ctx = ctx; + dio10->base.funcs = &dcn10_dio_funcs; + + dio10->regs = regs; + dio10->shifts = shifts; + dio10->masks = masks; +} diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.h b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.h new file mode 100644 index 000000000000..369c5996326e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.h @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2025 Advanced Micro Devices, Inc. + +#ifndef __DCN10_DIO_H__ +#define __DCN10_DIO_H__ + +#include "dio.h" + +#define TO_DCN10_DIO(dio_base) \ + container_of(dio_base, struct dcn10_dio, base) + +#define DIO_REG_LIST_DCN10()\ + SR(DIO_MEM_PWR_CTRL) + +struct dcn_dio_registers { + uint32_t DIO_MEM_PWR_CTRL; +}; + +struct dcn_dio_shift { + uint8_t I2C_LIGHT_SLEEP_FORCE; +}; + +struct dcn_dio_mask { + uint32_t I2C_LIGHT_SLEEP_FORCE; +}; + +struct dcn10_dio { + struct dio base; + const struct dcn_dio_registers *regs; + const struct dcn_dio_shift *shifts; + const struct dcn_dio_mask *masks; +}; + +void dcn10_dio_construct( + struct dcn10_dio *dio10, + struct dc_context *ctx, + const struct dcn_dio_registers *regs, + const struct dcn_dio_shift *shifts, + const struct dcn_dio_mask *masks); + +#endif /* __DCN10_DIO_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_link_encoder.c index 1d226e0519a5..2655bc194a35 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_link_encoder.c @@ -128,5 +128,3 @@ bool virtual_link_encoder_construct( return true; } - - diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_link_encoder.h index eb1a94fb8a9b..eb1a94fb8a9b 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_link_encoder.h diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.c index ad088d70e189..c5d2e9404d94 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.c @@ -171,4 +171,3 @@ struct stream_encoder *virtual_stream_encoder_create( kfree(enc); return NULL; } - diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.h index bf3422c66976..bf3422c66976 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.h diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 7014b8c2c956..2818df555e62 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -209,7 +209,7 @@ void dm_helpers_init_panel_settings( struct dc_sink *sink); void dm_helpers_override_panel_settings( struct dc_context *ctx, - struct dc_panel_config *config); + struct dc_link *link); int dm_helper_dmub_aux_transfer_sync( struct dc_context *ctx, const struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 817a370e80a7..8a177d5ae213 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -164,8 +164,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { }, }, .num_states = 5, - .sr_exit_time_us = 28.0, - .sr_enter_plus_exit_time_us = 30.0, + .sr_exit_time_us = 31.0, + .sr_enter_plus_exit_time_us = 33.0, .sr_exit_z8_time_us = 250.0, .sr_enter_plus_exit_z8_time_us = 350.0, .fclk_change_latency_us = 24.0, diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c index 89890c88fd66..4022f91193ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c @@ -655,7 +655,7 @@ __DML_DLL_EXPORT__ void dml_print_soc_bounding_box(const struct soc_bounding_box dml_print("DML: soc_bbox: refclk_mhz = %f\n", soc->refclk_mhz); dml_print("DML: soc_bbox: amclk_mhz = %f\n", soc->amclk_mhz); - dml_print("DML: soc_bbox: max_outstanding_reqs = %f\n", soc->max_outstanding_reqs); + dml_print("DML: soc_bbox: max_outstanding_reqs = %d\n", soc->max_outstanding_reqs); dml_print("DML: soc_bbox: pct_ideal_sdp_bw_after_urgent = %f\n", soc->pct_ideal_sdp_bw_after_urgent); dml_print("DML: soc_bbox: pct_ideal_fabric_bw_after_urgent = %f\n", soc->pct_ideal_fabric_bw_after_urgent); dml_print("DML: soc_bbox: pct_ideal_dram_bw_after_urgent_pixel_only = %f\n", soc->pct_ideal_dram_bw_after_urgent_pixel_only); diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h index 1fbc520c2540..c4cce870877a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h @@ -115,9 +115,12 @@ struct dml2_dram_params { unsigned int channel_width_bytes; unsigned int channel_count; unsigned int transactions_per_clock; + bool alt_clock_bw_conversion; }; +#define ENABLE_WCK struct dml2_soc_state_table { + struct dml2_clk_table wck_ratio; struct dml2_clk_table uclk; struct dml2_clk_table fclk; struct dml2_clk_table dcfclk; diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index 01b87be24ce3..ca5ac3c0deb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -7077,10 +7077,21 @@ static void calculate_excess_vactive_bandwidth_required( } } -static double uclk_khz_to_dram_bw_mbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config) +static double uclk_khz_to_dram_bw_mbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config, const struct dml2_mcg_dram_bw_to_min_clk_table *dram_bw_table) { double bw_mbps = 0; - bw_mbps = ((double)uclk_khz * dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock) / 1000.0; + unsigned int i; + + if (!dram_config->alt_clock_bw_conversion) + bw_mbps = ((double)uclk_khz * dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock) / 1000.0; + else + for (i = 0; i < dram_bw_table->num_entries; i++) + if (dram_bw_table->entries[i].min_uclk_khz >= uclk_khz) { + bw_mbps = (double)dram_bw_table->entries[i].pre_derate_dram_bw_kbps / 1000.0; + break; + } + + DML_ASSERT(bw_mbps > 0); return bw_mbps; } @@ -7964,7 +7975,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dispclk / 1000; mode_lib->ms.max_dscclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dscclk / 1000; mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dppclk / 1000; - mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config); + mode_lib->ms.uclk_freq_mhz = (double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_uclk_khz / 1000.0; + if (!mode_lib->ms.uclk_freq_mhz) + mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config); mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000); mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000); mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params); @@ -10407,7 +10420,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.Dcfclk = programming->min_clocks.dcn4x.active.dcfclk_khz / 1000.0; mode_lib->mp.FabricClock = programming->min_clocks.dcn4x.active.fclk_khz / 1000.0; - mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config); + mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config, &min_clk_table->dram_bw_table); mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4x.active.uclk_khz / 1000.0; mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4x.dpprefclk_khz / 1000.0; s->SOCCLK = (double)programming->min_clocks.dcn4x.socclk_khz / 1000; @@ -10485,7 +10498,10 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK); DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index); DML_LOG_VERBOSE("DML::%s: min_clk_table min_fclk_khz = %ld\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz); - DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config)); + if (min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_uclk_khz) + DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_uclk_khz / 1000.0); + else + DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config)); for (k = 0; k < mode_lib->mp.num_active_pipes; ++k) { DML_LOG_VERBOSE("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]); DML_LOG_VERBOSE("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]); diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c index 22969a533a7b..9d7741fd0adb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c @@ -7,14 +7,24 @@ #include "dml_top_types.h" #include "lib_float_math.h" -static double dram_bw_kbps_to_uclk_khz(unsigned long long bandwidth_kbps, const struct dml2_dram_params *dram_config) +static double dram_bw_kbps_to_uclk_khz(unsigned long long bandwidth_kbps, const struct dml2_dram_params *dram_config, struct dml2_mcg_dram_bw_to_min_clk_table *dram_bw_table) { double uclk_khz = 0; - unsigned long uclk_mbytes_per_tick = 0; - uclk_mbytes_per_tick = dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock; + if (!dram_config->alt_clock_bw_conversion) { + unsigned long uclk_bytes_per_tick = 0; - uclk_khz = (double)bandwidth_kbps / uclk_mbytes_per_tick; + uclk_bytes_per_tick = dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock; + uclk_khz = (double)bandwidth_kbps / uclk_bytes_per_tick; + } else { + unsigned int i; + /* For lpddr5 bytes per tick changes with mpstate, use table to find uclk*/ + for (i = 0; i < dram_bw_table->num_entries; i++) + if (dram_bw_table->entries[i].pre_derate_dram_bw_kbps >= bandwidth_kbps) { + uclk_khz = dram_bw_table->entries[i].min_uclk_khz; + break; + } + } return uclk_khz; } @@ -34,7 +44,7 @@ static void get_minimum_clocks_for_latency(struct dml2_dpmm_map_mode_to_soc_dpm_ *dcfclk = in_out->min_clk_table->dram_bw_table.entries[min_clock_index_for_latency].min_dcfclk_khz; *fclk = in_out->min_clk_table->dram_bw_table.entries[min_clock_index_for_latency].min_fclk_khz; *uclk = dram_bw_kbps_to_uclk_khz(in_out->min_clk_table->dram_bw_table.entries[min_clock_index_for_latency].pre_derate_dram_bw_kbps, - &in_out->soc_bb->clk_table.dram_config); + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); } static unsigned long dml_round_up(double a) @@ -53,14 +63,18 @@ static void calculate_system_active_minimums(struct dml2_dpmm_map_mode_to_soc_dp double min_uclk_latency, min_fclk_latency, min_dcfclk_latency; const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result; - min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); - min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100); + min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.average_bw_dram_kbps + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100)), + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); - min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); if (in_out->display_cfg->display_config.hostvm_enable) - min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel_and_vm / 100); + min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.urgent_bw_dram_kbps + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel_and_vm / 100)), + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); else - min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100); + min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.urgent_bw_dram_kbps + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100)), + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg; @@ -97,11 +111,13 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result; /* assumes DF throttling is enabled */ - min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); - min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100); + min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.average_bw_dram_kbps + / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100)), + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); - min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); - min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100); + min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps + / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100)), + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg; @@ -128,11 +144,13 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency); /* assumes DF throttling is disabled */ - min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); - min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100); + min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.average_bw_dram_kbps + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100)), + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); - min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); - min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100); + min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100)), + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg; @@ -167,8 +185,9 @@ static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_ double min_uclk_latency, min_fclk_latency, min_dcfclk_latency; const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result; - min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); - min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.dram_derate_percent_pixel / 100); + min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.average_bw_dram_kbps + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.dram_derate_percent_pixel / 100)), + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); min_fclk_avg = (double)mode_support_result->global.active.average_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes; min_fclk_avg = (double)min_fclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.fclk_derate_percent / 100); diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h index 1a6c0727cd2a..a6bd75f30d20 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h @@ -16,6 +16,7 @@ struct dram_bw_to_min_clk_table_entry { unsigned long long pre_derate_dram_bw_kbps; + unsigned long min_uclk_khz; unsigned long min_fclk_khz; unsigned long min_dcfclk_khz; }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index ef4a16117181..c7923531da83 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -376,10 +376,10 @@ void dpp3_cnv_setup ( tbl_entry.color_space = input_color_space; - if (color_space >= COLOR_SPACE_YCBCR601) - select = INPUT_CSC_SELECT_ICSC; - else + if (dpp3_should_bypass_post_csc_for_colorspace(color_space)) select = INPUT_CSC_SELECT_BYPASS; + else + select = INPUT_CSC_SELECT_ICSC; dpp3_program_post_csc(dpp_base, color_space, select, &tbl_entry); @@ -1541,3 +1541,18 @@ bool dpp3_construct( return true; } +bool dpp3_should_bypass_post_csc_for_colorspace(enum dc_color_space dc_color_space) +{ + switch (dc_color_space) { + case COLOR_SPACE_UNKNOWN: + case COLOR_SPACE_SRGB: + case COLOR_SPACE_XR_RGB: + case COLOR_SPACE_SRGB_LIMITED: + case COLOR_SPACE_MSREF_SCRGB: + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + return true; + default: + return false; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h index d4a70b4379ea..6a61b99d6a79 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -644,4 +644,8 @@ void dpp3_program_cm_dealpha( void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, struct dpp_grph_csc_adjustment *adjust); + +bool dpp3_should_bypass_post_csc_for_colorspace( + enum dc_color_space dc_color_space); + #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c index 96c2c853de42..2d6a646462e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c @@ -206,10 +206,10 @@ void dpp401_dpp_setup( tbl_entry.color_space = input_color_space; - if (color_space >= COLOR_SPACE_YCBCR601) - select = INPUT_CSC_SELECT_ICSC; - else + if (dpp3_should_bypass_post_csc_for_colorspace(color_space)) select = INPUT_CSC_SELECT_BYPASS; + else + select = INPUT_CSC_SELECT_ICSC; dpp3_program_post_csc(dpp_base, color_space, select, &tbl_entry); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h index 81c83d5fe042..ad7ef83694ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h @@ -115,6 +115,7 @@ struct dsc_funcs { void (*dsc_disconnect)(struct display_stream_compressor *dsc); void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc); void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); + void (*set_fgcg)(struct display_stream_compressor *dsc, bool enable); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c index d11afd1ce72a..941dce439e97 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c @@ -41,7 +41,7 @@ #define FN(reg_name, field_name) \ hubbub2->shifts->field_name, hubbub2->masks->field_name -static void dcn401_init_crb(struct hubbub *hubbub) +void dcn401_init_crb(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); @@ -1110,7 +1110,7 @@ bool hubbub401_get_dcc_compression_cap(struct hubbub *hubbub, return true; } -static void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg) +void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); @@ -1147,7 +1147,7 @@ static void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, un } } -static void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase) +void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); @@ -1170,7 +1170,7 @@ static void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned comp } } -static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) +void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); @@ -1192,7 +1192,7 @@ static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) } } -static bool dcn401_program_arbiter(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower) +bool dcn401_program_arbiter(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h index b1d9ea9d1c3d..f48715544429 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h @@ -194,6 +194,11 @@ bool hubbub401_get_dcc_compression_cap( const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output); +bool dcn401_program_arbiter( + struct hubbub *hubbub, + struct dml2_display_arb_regs *arb_regs, + bool safe_to_lower); + void hubbub401_construct(struct dcn20_hubbub *hubbub2, struct dc_context *ctx, const struct dcn_hubbub_registers *hubbub_regs, @@ -203,4 +208,9 @@ void hubbub401_construct(struct dcn20_hubbub *hubbub2, int pixel_chunk_size_kb, int config_return_buffer_size_kb); +void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg); +void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); +void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst); +void dcn401_init_crb(struct hubbub *hubbub); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 4659e1b489ba..699a756bbc40 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -660,20 +660,6 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx) } } -static void -dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable) -{ - struct dc_link *link = pipe_ctx->stream->link; - struct dc_bios *bios = link->ctx->dc_bios; - struct bp_encoder_control encoder_control = {0}; - - encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE; - encoder_control.engine_id = link->link_enc->analog_engine; - encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10; - - bios->funcs->encoder_control(bios, &encoder_control); -} - void dce110_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = @@ -704,8 +690,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) tg->funcs->set_early_control(tg, early_control); - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) - dce110_dac_encoder_control(pipe_ctx, true); } static enum bp_result link_transmitter_control( @@ -1199,9 +1183,6 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } - - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) - dce110_dac_encoder_control(pipe_ctx, false); } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -1584,25 +1565,6 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } -static void -dce110_select_crtc_source(struct pipe_ctx *pipe_ctx) -{ - struct dc_link *link = pipe_ctx->stream->link; - struct dc_bios *bios = link->ctx->dc_bios; - struct bp_crtc_source_select crtc_source_select = {0}; - enum engine_id engine_id = link->link_enc->preferred_engine; - - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) - engine_id = link->link_enc->analog_engine; - - crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst; - crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth; - crtc_source_select.engine_id = engine_id; - crtc_source_select.sink_signal = pipe_ctx->stream->signal; - - bios->funcs->select_crtc_source(bios, &crtc_source_select); -} - enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, @@ -1622,10 +1584,6 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( hws->funcs.disable_stream_gating(dc, pipe_ctx); } - if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) { - dce110_select_crtc_source(pipe_ctx); - } - if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output = {0}; @@ -1705,8 +1663,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.tg->funcs->set_static_screen_control( pipe_ctx->stream_res.tg, event_triggers, 2); - if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && - !dc_is_rgb_signal(pipe_ctx->stream->signal)) + if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); @@ -1964,8 +1921,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) get_edp_streams(context, edp_streams, &edp_stream_num); - /* Check fastboot support, disable on DCE 6-8 because of blank screens */ - if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) { + /* Check fastboot support, disable on DCE 6-8-10 because of blank screens */ + if (edp_num && edp_stream_num && dc->ctx->dce_version > DCE_VERSION_10_0) { for (i = 0; i < edp_num; i++) { edp_link = edp_links[i]; if (edp_link != edp_streams[0]->link) @@ -3304,6 +3261,15 @@ void dce110_enable_tmds_link_output(struct dc_link *link, link->phy_state.symclk_state = SYMCLK_ON_TX_ON; } +static void dce110_enable_analog_link_output( + struct dc_link *link, + uint32_t pix_clk_100hz) +{ + link->link_enc->funcs->enable_analog_output( + link->link_enc, + pix_clk_100hz); +} + void dce110_enable_dp_link_output( struct dc_link *link, const struct link_resource *link_res, @@ -3441,6 +3407,7 @@ static const struct hw_sequencer_funcs dce110_funcs = { .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, + .enable_analog_link_output = dce110_enable_analog_link_output, .disable_link_output = dce110_disable_link_output, }; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index c1586364ecd4..5243177c1faa 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -50,6 +50,7 @@ #include "link_hwss.h" #include "dpcd_defs.h" #include "dsc.h" +#include "dio/dcn10/dcn10_dio.h" #include "dce/dmub_psr.h" #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" @@ -1881,13 +1882,13 @@ void dcn10_init_hw(struct dc *dc) /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ if (!is_optimized_init_done) - REG_WRITE(DIO_MEM_PWR_CTRL, 0); + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false); if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index a76436dcbe40..307e8f8060e6 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -357,26 +357,10 @@ void dcn20_enable_power_gating_plane( void dcn20_dccg_init(struct dce_hwseq *hws) { - /* - * set MICROSECOND_TIME_BASE_DIV - * 100Mhz refclk -> 0x120264 - * 27Mhz refclk -> 0x12021b - * 48Mhz refclk -> 0x120230 - * - */ - REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264); + struct dc *dc = hws->ctx->dc; - /* - * set MILLISECOND_TIME_BASE_DIV - * 100Mhz refclk -> 0x1186a0 - * 27Mhz refclk -> 0x106978 - * 48Mhz refclk -> 0x10bb80 - * - */ - REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0); - - /* This value is dependent on the hardware pipeline delay so set once per SOC */ - REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c); + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->dccg_init) + dc->res_pool->dccg->funcs->dccg_init(dc->res_pool->dccg); } void dcn20_disable_vga( @@ -3155,8 +3139,12 @@ void dcn20_fpga_init_hw(struct dc *dc) REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); dcn10_hubbub_global_timer_enable(dc->res_pool->hubbub, true, 2); - if (REG(REFCLK_CNTL)) - REG_WRITE(REFCLK_CNTL, 0); + + if (hws->funcs.dccg_init) + hws->funcs.dccg_init(hws); + + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->refclk_setup) + dc->res_pool->dccg->funcs->refclk_setup(dc->res_pool->dccg); // diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c index 482053c4ad22..ce18d75fd991 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c @@ -40,6 +40,8 @@ #include "clk_mgr.h" #include "reg_helper.h" #include "dcn10/dcn10_hubbub.h" +#include "dio/dcn10/dcn10_dio.h" + #define CTX \ hws->ctx @@ -360,13 +362,13 @@ void dcn201_init_hw(struct dc *dc) } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ - REG_WRITE(DIO_MEM_PWR_CTRL, 0); + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false); if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index e2269211553c..062745389d9a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -33,6 +33,7 @@ #include "vmid.h" #include "reg_helper.h" #include "hw/clk_mgr.h" +#include "hw/dccg.h" #include "dc_dmub_srv.h" #include "abm.h" #include "link_service.h" @@ -87,12 +88,10 @@ int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_ bool dcn21_s0i3_golden_init_wa(struct dc *dc) { - struct dce_hwseq *hws = dc->hwseq; - uint32_t value = 0; + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->is_s0i3_golden_init_wa_done) + return !dc->res_pool->dccg->funcs->is_s0i3_golden_init_wa_done(dc->res_pool->dccg); - value = REG_READ(MICROSECOND_TIME_BASE_DIV); - - return value != 0x00120464; + return false; } void dcn21_exit_optimized_pwr_state( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 9b95f5883925..d04cfd403b7e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -53,6 +53,7 @@ #include "dcn30/dcn30_resource.h" #include "link_service.h" #include "dc_state_priv.h" +#include "dio/dcn10/dcn10_dio.h" #define TO_DCN_DCCG(dccg)\ container_of(dccg, struct dcn_dccg, base) @@ -795,13 +796,13 @@ void dcn30_init_hw(struct dc *dc) } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ - REG_WRITE(DIO_MEM_PWR_CTRL, 0); + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false); if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 2adbcc105aa6..db2f7cbb12ff 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -53,6 +53,7 @@ #include "dcn30/dcn30_vpg.h" #include "dce/dce_i2c_hw.h" #include "dce/dmub_abm_lcd.h" +#include "dio/dcn10/dcn10_dio.h" #define DC_LOGGER_INIT(logger) @@ -237,21 +238,17 @@ void dcn31_init_hw(struct dc *dc) abms[i]->funcs->abm_init(abms[i], backlight, user_level); } - /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - // Set i2c to light sleep until engine is setup - if (dc->debug.enable_mem_low_power.bits.i2c) - REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); + /* Power on DIO memory (AFMT HDMI) and set I2C to light sleep */ + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, dc->debug.enable_mem_low_power.bits.i2c); if (hws->funcs.setup_hpo_hw_control) hws->funcs.setup_hpo_hw_control(hws, false); if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 518794fad9e1..a0aaa727e9fa 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -52,6 +52,7 @@ #include "link_service.h" #include "../dcn20/dcn20_hwseq.h" #include "dc_state_priv.h" +#include "dio/dcn10/dcn10_dio.h" #define DC_LOGGER_INIT(logger) @@ -485,7 +486,7 @@ bool dcn32_set_mcm_luts( struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - bool result = true; + bool rval, result; const struct pwl_params *lut_params = NULL; // 1D LUT @@ -508,10 +509,10 @@ bool dcn32_set_mcm_luts( lut_params = &plane_state->in_shaper_func.pwl; else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { // TODO: dpp_base replace - cm3_helper_translate_curve_to_hw_format(plane_state->ctx, + rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, &plane_state->in_shaper_func, &dpp_base->shaper_params, true); - lut_params = &dpp_base->shaper_params; + lut_params = rval ? &dpp_base->shaper_params : NULL; } mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); @@ -957,13 +958,13 @@ void dcn32_init_hw(struct dc *dc) } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ - REG_WRITE(DIO_MEM_PWR_CTRL, 0); + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false); if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index f7e16fee7594..b5a4cefbd35f 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -53,6 +53,7 @@ #include "dcn30/dcn30_vpg.h" #include "dce/dce_i2c_hw.h" #include "dsc.h" +#include "dio/dcn10/dcn10_dio.h" #include "dcn20/dcn20_optc.h" #include "dcn30/dcn30_cm_common.h" #include "dcn31/dcn31_hwseq.h" @@ -272,12 +273,9 @@ void dcn35_init_hw(struct dc *dc) } } - /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - // Set i2c to light sleep until engine is setup - if (dc->debug.enable_mem_low_power.bits.i2c) - REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 0); + /* Power on DIO memory (AFMT HDMI) and optionally disable I2C light sleep */ + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, !dc->debug.enable_mem_low_power.bits.i2c); if (hws->funcs.setup_hpo_hw_control) hws->funcs.setup_hpo_hw_control(hws, false); @@ -288,7 +286,8 @@ void dcn35_init_hw(struct dc *dc) } if (dc->debug.disable_mem_low_power) { - REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1); + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->enable_memory_low_power) + dc->res_pool->dccg->funcs->enable_memory_low_power(dc->res_pool->dccg, false); } if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 39b6f6d2d7c1..b91517b9fedc 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -39,6 +39,7 @@ #include "dc_state_priv.h" #include "link_enc_cfg.h" #include "../hw_sequencer.h" +#include "dio/dcn10/dcn10_dio.h" #define DC_LOGGER_INIT(logger) @@ -320,13 +321,13 @@ void dcn401_init_hw(struct dc *dc) } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ - REG_WRITE(DIO_MEM_PWR_CTRL, 0); + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false); if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 51b0f0fd8fcd..4632a5761b16 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -1184,6 +1184,8 @@ struct hw_sequencer_funcs { const struct link_resource *link_res, enum clock_source_id clock_source, uint32_t pixel_clock); + void (*enable_analog_link_output)(struct dc_link *link, + uint32_t pixel_clock); void (*disable_link_output)(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 61d8ef759aca..43579b0e1482 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -35,6 +35,7 @@ #include "hubp.h" #include "mpc.h" #include "dwb.h" +#include "hw/dio.h" #include "mcif_wb.h" #include "panel_cntl.h" #include "dmub/inc/dmub_cmd.h" @@ -250,6 +251,7 @@ struct resource_pool { struct timing_generator *timing_generators[MAX_PIPES]; struct stream_encoder *stream_enc[MAX_PIPES * 2]; struct hubbub *hubbub; + struct dio *dio; struct mpc *mpc; struct pp_smu_funcs *pp_smu; struct dce_aux *engines[MAX_PIPES]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 1e6ffd86a4c0..d6f5e01a0b66 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -224,6 +224,10 @@ struct dccg_funcs { void (*otg_drop_pixel)(struct dccg *dccg, uint32_t otg_inst); void (*dccg_init)(struct dccg *dccg); + void (*refclk_setup)(struct dccg *dccg); /* Deprecated - for backward compatibility only */ + void (*allow_clock_gating)(struct dccg *dccg, bool allow); + void (*enable_memory_low_power)(struct dccg *dccg, bool enable); + bool (*is_s0i3_golden_init_wa_done)(struct dccg *dccg); void (*set_dpstreamclk_root_clock_gating)( struct dccg *dccg, int dp_hpo_inst, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dio.h new file mode 100644 index 000000000000..532bf54cf2c4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dio.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2025 Advanced Micro Devices, Inc. + +#ifndef __DC_DIO_H__ +#define __DC_DIO_H__ + +#include "dc_types.h" + +struct dc_context; +struct dio; + +struct dio_funcs { + void (*mem_pwr_ctrl)(struct dio *dio, bool enable_i2c_light_sleep); +}; + +struct dio { + const struct dio_funcs *funcs; + struct dc_context *ctx; +}; + +#endif /* __DC_DIO_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index d795fc43dc9d..5abbf485d273 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -132,6 +132,8 @@ struct link_encoder_funcs { void (*enable_lvds_output)(struct link_encoder *enc, enum clock_source_id clock_source, uint32_t pixel_clock); + void (*enable_analog_output)(struct link_encoder *enc, + uint32_t pixel_clock); void (*disable_output)(struct link_encoder *link_enc, enum signal_type signal); void (*dp_set_lane_settings)(struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index da7bf59c4b9d..671ab1fc7320 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -122,6 +122,12 @@ enum timing_synchronization_type { VBLANK_SYNCHRONIZABLE }; +enum crc_poly_mode { + CRC_POLY_MODE_16, + CRC_POLY_MODE_32, + CRC_POLY_MODE_MAX, +}; + struct crc_params { /* Regions used to calculate CRC*/ uint16_t windowa_x_start; @@ -144,6 +150,7 @@ struct crc_params { uint8_t crc_eng_inst; bool reset; + enum crc_poly_mode crc_poly_mode; }; struct dcn_otg_state { diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile index 84dace27daf7..0f3670e30232 100644 --- a/drivers/gpu/drm/amd/display/dc/link/Makefile +++ b/drivers/gpu/drm/amd/display/dc/link/Makefile @@ -43,7 +43,8 @@ AMD_DISPLAY_FILES += $(AMD_DAL_LINK_ACCESSORIES) # hwss ############################################################################### LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o \ -link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o +link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o \ +link_hwss_virtual.o AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \ $(LINK_HWSS)) diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_virtual.c index 4f7f99156897..64742c24f7e6 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_virtual.c @@ -23,7 +23,7 @@ * */ -#include "virtual_link_hwss.h" +#include "link_hwss_virtual.h" void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx) { diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_virtual.h index fbcbc5afb47d..f6e448c89751 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_virtual.h @@ -22,8 +22,8 @@ * Authors: AMD * */ -#ifndef __DC_VIRTUAL_LINK_HWSS_H__ -#define __DC_VIRTUAL_LINK_HWSS_H__ +#ifndef __DC_LINK_HWSS_VIRTUAL_H__ +#define __DC_LINK_HWSS_VIRTUAL_H__ #include "core_types.h" @@ -32,4 +32,4 @@ void virtual_setup_stream_attribute(struct pipe_ctx *pipe_ctx); void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx); const struct link_hwss *get_virtual_link_hwss(void); -#endif /* __DC_VIRTUAL_LINK_HWSS_H__ */ +#endif /* __DC_LINK_HWSS_VIRTUAL_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 578509e8d0e2..b2c020071cbf 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -1333,7 +1333,7 @@ static bool detect_link_and_local_sink(struct dc_link *link, // Pickup base DM settings dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink); // Override dc_panel_config if system has specific settings - dm_helpers_override_panel_settings(dc_ctx, &link->panel_config); + dm_helpers_override_panel_settings(dc_ctx, link); //sink only can use supported link rate table, we are foreced to enable it if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 91742bde4dc2..9b1d34c3438b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -2155,6 +2155,18 @@ static enum dc_status enable_link_dp_mst( return enable_link_dp(state, pipe_ctx); } +static enum dc_status enable_link_analog( + struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + struct dc_link *link = pipe_ctx->stream->link; + + link->dc->hwss.enable_analog_link_output( + link, pipe_ctx->stream->timing.pix_clk_100hz); + + return DC_OK; +} + static enum dc_status enable_link_virtual(struct pipe_ctx *pipe_ctx) { struct dc_link *link = pipe_ctx->stream->link; @@ -2210,7 +2222,7 @@ static enum dc_status enable_link( status = DC_OK; break; case SIGNAL_TYPE_RGB: - status = DC_OK; + status = enable_link_analog(state, pipe_ctx); break; case SIGNAL_TYPE_VIRTUAL: status = enable_link_virtual(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 5fbcf04c6251..49db8123f08c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -561,12 +561,13 @@ static bool construct_phy(struct dc_link *link, enc_init_data.connector = link->link_id; enc_init_data.channel = get_ddc_line(link); enc_init_data.transmitter = transmitter_from_encoder; - enc_init_data.analog_engine = find_analog_engine(link, &enc_init_data.analog_encoder); enc_init_data.encoder = link_encoder; enc_init_data.analog_engine = link_analog_engine; - enc_init_data.hpd_gpio = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, - link->ctx->gpio_service); - + if (link->ctx->dce_version <= DCN_VERSION_4_01) + enc_init_data.hpd_gpio = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + else + enc_init_data.hpd_gpio = NULL; if (enc_init_data.hpd_gpio) { dal_gpio_open(enc_init_data.hpd_gpio, GPIO_MODE_INTERRUPT); dal_gpio_unlock_pin(enc_init_data.hpd_gpio); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c index bbd6f93f5c98..7e45d1e767bb 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c @@ -34,6 +34,47 @@ link->ctx->logger #define DP_SINK_PR_ENABLE_AND_CONFIGURATION 0x37B +#define DP_SINK_ENABLE_FRAME_SKIPPING_MODE_SHIFT (5) + +static unsigned int dp_pr_calc_num_static_frames(unsigned int vsync_rate_hz) +{ + // at least 2 frames for static screen + unsigned int num_frames = 2; + + // get number of frames for at least 50ms + if (vsync_rate_hz > 40) + num_frames = (vsync_rate_hz + 10) / 20; + + return num_frames; +} + +static void dp_pr_set_static_screen_param(struct dc_link *link) +{ + struct dc_static_screen_params params = {0}; + struct dc *dc = link->ctx->dc; + // only support DP sst for now + if (!dc_is_dp_sst_signal(link->connector_signal)) + return; + + for (int i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream && + dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) { + struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; + unsigned int vsync_rate_hz = div64_u64(div64_u64( + (stream->timing.pix_clk_100hz * (u64)100), + stream->timing.v_total), + stream->timing.h_total); + + params.triggers.cursor_update = true; + params.triggers.overlay_update = true; + params.triggers.surface_update = true; + params.num_frames = dp_pr_calc_num_static_frames(vsync_rate_hz); + + dc_stream_set_static_screen_params(dc, &stream, 1, ¶ms); + break; + } + } +} static bool dp_setup_panel_replay(struct dc_link *link, const struct dc_stream_state *stream) { @@ -49,6 +90,7 @@ static bool dp_setup_panel_replay(struct dc_link *link, const struct dc_stream_s union panel_replay_enable_and_configuration_2 pr_config_2 = { 0 }; union dpcd_alpm_configuration alpm_config; + uint8_t data = 0; replay_context.controllerId = CONTROLLER_ID_UNDEFINED; @@ -146,6 +188,14 @@ static bool dp_setup_panel_replay(struct dc_link *link, const struct dc_stream_s DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw, sizeof(alpm_config.raw)); + + //Enable frame skipping + if (link->replay_settings.config.frame_skip_supported) + data = data | (1 << DP_SINK_ENABLE_FRAME_SKIPPING_MODE_SHIFT); + + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_SINK_PR_ENABLE_AND_CONFIGURATION, + (uint8_t *)&(data), sizeof(uint8_t)); } return true; @@ -159,6 +209,9 @@ bool dp_pr_get_panel_inst(const struct dc *dc, if (!dc || !link || !inst_out) return false; + if (dc->config.frame_update_cmd_version2 == false) + return dc_get_edp_link_panel_inst(dc, link, inst_out); + if (!dc_is_dp_sst_signal(link->connector_signal)) /* only supoprt DP sst (eDP included) for now */ return false; @@ -199,6 +252,9 @@ bool dp_pr_enable(struct dc_link *link, bool enable) if (!dp_pr_get_panel_inst(dc, link, &panel_inst)) return false; + if (enable && !dc_is_embedded_signal(link->connector_signal)) + dp_pr_set_static_screen_param(link); + if (link->replay_settings.replay_allow_active != enable) { //for sending PR enable commands to DMUB memset(&cmd, 0, sizeof(cmd)); @@ -276,6 +332,12 @@ bool dp_pr_copy_settings(struct dc_link *link, struct replay_context *replay_con pipe_ctx->stream->timing.v_border_top + pipe_ctx->stream->timing.v_border_bottom) / pipe_ctx->stream->timing.dsc_cfg.num_slices_v; + if (dc_is_embedded_signal(link->connector_signal)) + cmd.pr_copy_settings.data.main_link_activity_option = OPTION_1C; + else + // For external DP, use option 1-A + cmd.pr_copy_settings.data.main_link_activity_option = OPTION_1A; + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c index 29f3a03687b2..b157d05b67ad 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c @@ -136,8 +136,13 @@ enum hpd_source_id get_hpd_line(struct dc_link *link) hpd_id = HPD_SOURCEID_UNKNOWN; - hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, - link->ctx->gpio_service); + /* Use GPIO path where supported, otherwise use hardware encoder path */ + if (link->ctx && link->ctx->dce_version <= DCN_VERSION_4_01) { + hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + } else { + hpd = NULL; + } if (hpd) { switch (dal_irq_get_source(hpd)) { diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c index 83bbbf34bcac..badcef027b84 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c @@ -724,8 +724,7 @@ bool mpc32_program_shaper( return false; } - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) - mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true); + mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true); current_mode = mpc32_get_shaper_current(mpc, mpcc_id); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index 803bcc25601c..0b3f974f452e 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -244,7 +244,13 @@ uint32_t OTG_TRIGB_MANUAL_TRIG; \ uint32_t OTG_UPDATE_LOCK; \ uint32_t OTG_V_TOTAL_INT_STATUS; \ - uint32_t OTG_VSYNC_NOM_INT_STATUS + uint32_t OTG_VSYNC_NOM_INT_STATUS; \ + uint32_t OTG_CRC0_DATA_R32; \ + uint32_t OTG_CRC0_DATA_G32; \ + uint32_t OTG_CRC0_DATA_B32; \ + uint32_t OTG_CRC1_DATA_R32; \ + uint32_t OTG_CRC1_DATA_G32; \ + uint32_t OTG_CRC1_DATA_B32 struct dcn_optc_registers { @@ -657,6 +663,15 @@ struct dcn_optc_registers { type OTG_V_COUNT_STOP;\ type OTG_V_COUNT_STOP_TIMER; +#define TG_REG_FIELD_LIST_DCN3_6(type) \ + type OTG_CRC_POLY_SEL; \ + type CRC0_R_CR32; \ + type CRC0_G_Y32; \ + type CRC0_B_CB32; \ + type CRC1_R_CR32; \ + type CRC1_G_Y32; \ + type CRC1_B_CB32; + #define TG_REG_FIELD_LIST_DCN401(type) \ type OPTC_SEGMENT_WIDTH_LAST;\ type OTG_PSTATE_KEEPOUT_START;\ @@ -670,6 +685,7 @@ struct dcn_optc_shift { TG_REG_FIELD_LIST_DCN2_0(uint8_t) TG_REG_FIELD_LIST_DCN3_2(uint8_t) TG_REG_FIELD_LIST_DCN3_5(uint8_t) + TG_REG_FIELD_LIST_DCN3_6(uint8_t) TG_REG_FIELD_LIST_DCN401(uint8_t) }; @@ -678,6 +694,7 @@ struct dcn_optc_mask { TG_REG_FIELD_LIST_DCN2_0(uint32_t) TG_REG_FIELD_LIST_DCN3_2(uint32_t) TG_REG_FIELD_LIST_DCN3_5(uint32_t) + TG_REG_FIELD_LIST_DCN3_6(uint32_t) TG_REG_FIELD_LIST_DCN401(uint32_t) }; diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c index f699e95059f3..5aafd0eedf66 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c @@ -180,7 +180,97 @@ static void optc35_phantom_crtc_post_enable(struct timing_generator *optc) REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); } -static bool optc35_configure_crc(struct timing_generator *optc, +/** + * optc35_get_crc - Capture CRC result per component + * + * @optc: timing_generator instance. + * @idx: index of crc engine to get CRC from + * @r_cr: primary CRC signature for red data. + * @g_y: primary CRC signature for green data. + * @b_cb: primary CRC signature for blue data. + * + * This function reads the CRC signature from the OPTC registers. Notice that + * we have three registers to keep the CRC result per color component (RGB). + * + * For different DCN versions: + * - If CRC32 registers (OTG_CRC0_DATA_R32/G32/B32) are available, read from + * 32-bit CRC registers. DCN 3.6+ supports both CRC-32 and CRC-16 polynomials + * selectable via OTG_CRC_POLY_SEL. + * - Otherwise, read from legacy 16-bit CRC registers (OTG_CRC0_DATA_RG/B) + * which only support CRC-16 polynomial. + * + * Returns: + * If CRC is disabled, return false; otherwise, return true, and the CRC + * results in the parameters. + */ +static bool optc35_get_crc(struct timing_generator *optc, uint8_t idx, + uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) +{ + uint32_t field = 0; + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field); + + /* Early return if CRC is not enabled for this CRTC */ + if (!field) + return false; + + if (optc1->tg_mask->CRC0_R_CR32 != 0 && optc1->tg_mask->CRC1_R_CR32 != 0 && + optc1->tg_mask->CRC0_G_Y32 != 0 && optc1->tg_mask->CRC1_G_Y32 != 0 && + optc1->tg_mask->CRC0_B_CB32 != 0 && optc1->tg_mask->CRC1_B_CB32 != 0) { + switch (idx) { + case 0: + /* OTG_CRC0_DATA_R32/G32/B32 has the CRC32 results */ + REG_GET(OTG_CRC0_DATA_R32, + CRC0_R_CR32, r_cr); + REG_GET(OTG_CRC0_DATA_G32, + CRC0_G_Y32, g_y); + REG_GET(OTG_CRC0_DATA_B32, + CRC0_B_CB32, b_cb); + break; + case 1: + /* OTG_CRC1_DATA_R32/G32/B32 has the CRC32 results */ + REG_GET(OTG_CRC1_DATA_R32, + CRC1_R_CR32, r_cr); + REG_GET(OTG_CRC1_DATA_G32, + CRC1_G_Y32, g_y); + REG_GET(OTG_CRC1_DATA_B32, + CRC1_B_CB32, b_cb); + break; + default: + return false; + } + } else { + switch (idx) { + case 0: + /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */ + REG_GET_2(OTG_CRC0_DATA_RG, + CRC0_R_CR, r_cr, + CRC0_G_Y, g_y); + + /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */ + REG_GET(OTG_CRC0_DATA_B, + CRC0_B_CB, b_cb); + break; + case 1: + /* OTG_CRC1_DATA_RG has the CRC16 results for the red and green component */ + REG_GET_2(OTG_CRC1_DATA_RG, + CRC1_R_CR, r_cr, + CRC1_G_Y, g_y); + + /* OTG_CRC1_DATA_B has the CRC16 results for the blue component */ + REG_GET(OTG_CRC1_DATA_B, + CRC1_B_CB, b_cb); + break; + default: + return false; + } + } + + return true; +} + +bool optc35_configure_crc(struct timing_generator *optc, const struct crc_params *params) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -266,6 +356,10 @@ static bool optc35_configure_crc(struct timing_generator *optc, default: return false; } + if (optc1->tg_mask->OTG_CRC_POLY_SEL != 0) { + REG_UPDATE(OTG_CRC_CNTL, + OTG_CRC_POLY_SEL, params->crc_poly_mode); + } return true; } @@ -343,7 +437,7 @@ void optc35_set_drr( REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0); } -static void optc35_set_long_vtotal( +void optc35_set_long_vtotal( struct timing_generator *optc, const struct long_vtotal_params *params) { @@ -430,7 +524,7 @@ static void optc35_set_long_vtotal( } } -static void optc35_wait_otg_disable(struct timing_generator *optc) +void optc35_wait_otg_disable(struct timing_generator *optc) { struct optc *optc1; uint32_t is_master_en; @@ -488,7 +582,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = { .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, .clear_optc_underflow = optc1_clear_optc_underflow, .setup_global_swap_lock = NULL, - .get_crc = optc1_get_crc, + .get_crc = optc35_get_crc, .configure_crc = optc35_configure_crc, .set_dsc_config = optc3_set_dsc_config, .get_dsc_status = optc2_get_dsc_status, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h index 733a2f149d9a..82e0818c8f9f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h @@ -74,10 +74,29 @@ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh) +#define OPTC_COMMON_MASK_SH_LIST_DCN3_6(mask_sh)\ + OPTC_COMMON_MASK_SH_LIST_DCN3_5(mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_POLY_SEL, mask_sh),\ + SF(OTG_CRC320_OTG_CRC0_DATA_R32, CRC0_R_CR32, mask_sh),\ + SF(OTG_CRC320_OTG_CRC0_DATA_G32, CRC0_G_Y32, mask_sh),\ + SF(OTG_CRC320_OTG_CRC0_DATA_B32, CRC0_B_CB32, mask_sh),\ + SF(OTG_CRC320_OTG_CRC1_DATA_R32, CRC1_R_CR32, mask_sh),\ + SF(OTG_CRC320_OTG_CRC1_DATA_G32, CRC1_G_Y32, mask_sh),\ + SF(OTG_CRC320_OTG_CRC1_DATA_B32, CRC1_B_CB32, mask_sh) + void dcn35_timing_generator_init(struct optc *optc1); void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable); void optc35_set_drr(struct timing_generator *optc, const struct drr_params *params); +void optc35_set_long_vtotal( + struct timing_generator *optc, + const struct long_vtotal_params *params); + +bool optc35_configure_crc(struct timing_generator *optc, + const struct crc_params *params); + +void optc35_wait_otg_disable(struct timing_generator *optc); + #endif /* __DC_OPTC_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index d40d91ec2035..05f7ff60f8f5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -31,7 +31,7 @@ #include "resource.h" #include "clk_mgr.h" #include "include/irq_service_interface.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce110/dce110_timing_generator.h" #include "irq/dce110/irq_service_dce110.h" @@ -226,7 +226,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(4), link_regs(5), link_regs(6), - { .DAC_ENABLE = mmDAC_ENABLE }, + {0} }; #define stream_enc_regs(id)\ @@ -242,7 +242,8 @@ static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5), - stream_enc_regs(6) + stream_enc_regs(6), + {SR(DAC_SOURCE_SELECT),} /* DACA */ }; static const struct dce_stream_encoder_shift se_shift = { @@ -491,7 +492,8 @@ static struct stream_encoder *dce100_stream_encoder_create( return NULL; if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { - dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } @@ -638,7 +640,8 @@ static struct link_encoder *dce100_link_encoder_create( if (!enc110) return NULL; - if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + if (enc_init_data->connector.id == CONNECTOR_ID_VGA && + enc_init_data->analog_engine != ENGINE_ID_UNKNOWN) { dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, @@ -978,7 +981,10 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( struct dc_link *link = stream->link; enum engine_id preferred_engine = link->link_enc->preferred_engine; - if (dc_is_rgb_signal(stream->signal)) + /* Prefer analog engine if the link encoder has one. + * Otherwise, it's an external encoder. + */ + if (dc_is_rgb_signal(stream->signal) && link->link_enc->analog_engine != ENGINE_ID_UNKNOWN) preferred_engine = link->link_enc->analog_engine; for (i = 0; i < pool->stream_enc_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index b1570b6b1af3..92890784caa6 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -35,7 +35,7 @@ #include "dce112/dce112_resource.h" #include "dce110/dce110_resource.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce120/dce120_timing_generator.h" #include "irq/dce120/irq_service_dce120.h" #include "dce/dce_opp.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index f0152933bee2..8d810d5c8781 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -243,7 +243,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(4), link_regs(5), {0}, - { .DAC_ENABLE = mmDAC_ENABLE }, + {0} }; #define stream_enc_regs(id)\ @@ -258,7 +258,9 @@ static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), - stream_enc_regs(5) + stream_enc_regs(5), + {0}, + {SR(DAC_SOURCE_SELECT),} /* DACA */ }; static const struct dce_stream_encoder_shift se_shift = { @@ -607,7 +609,8 @@ static struct stream_encoder *dce60_stream_encoder_create( return NULL; if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { - dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } @@ -733,8 +736,9 @@ static struct link_encoder *dce60_link_encoder_create( if (!enc110) return NULL; - if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { - dce110_link_encoder_construct(enc110, + if (enc_init_data->connector.id == CONNECTOR_ID_VGA && + enc_init_data->analog_engine != ENGINE_ID_UNKNOWN) { + dce60_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[ENGINE_ID_DACA], diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 8687104cabb7..a68e799d5885 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -242,7 +242,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(4), link_regs(5), link_regs(6), - { .DAC_ENABLE = mmDAC_ENABLE }, + {0} }; #define stream_enc_regs(id)\ @@ -258,7 +258,8 @@ static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5), - stream_enc_regs(6) + stream_enc_regs(6), + {SR(DAC_SOURCE_SELECT),} /* DACA */ }; static const struct dce_stream_encoder_shift se_shift = { @@ -614,7 +615,8 @@ static struct stream_encoder *dce80_stream_encoder_create( return NULL; if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { - dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } @@ -740,7 +742,8 @@ static struct link_encoder *dce80_link_encoder_create( if (!enc110) return NULL; - if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + if (enc_init_data->connector.id == CONNECTOR_ID_VGA && + enc_init_data->analog_engine != ENGINE_ID_UNKNOWN) { dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index f12367adf145..476780a5450f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -48,7 +48,7 @@ #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" #include "dcn10/dcn10_hubp.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 46985eb2a623..6731544f0981 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -55,7 +55,7 @@ #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dcn20/dcn20_dccg.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index 055107843a70..90d38631f63a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -51,7 +51,7 @@ #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 967e813a45e5..107612595db6 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -57,7 +57,7 @@ #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dml/display_mode_vba.h" #include "dcn20/dcn20_dccg.h" #include "dcn21/dcn21_dccg.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index d0ebb733e802..6cfdc37dab58 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -55,7 +55,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dcn30/dcn30_dccg.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 3ad6a3d4858e..e1d0c166b484 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -54,7 +54,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dcn301/dcn301_dccg.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index e853ea110310..8ad72557b16a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -64,7 +64,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dml/dcn31/dcn31_fpu.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 3ccde75a4ecb..5f0fe6e5bd82 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -66,7 +66,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dml/dcn31/dcn31_fpu.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 4e962f522f1b..3ae787a377b1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -63,7 +63,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dml/dcn31/dcn31_fpu.h" @@ -1230,12 +1230,12 @@ static struct stream_encoder *dcn315_stream_encoder_create( /*PHYB is wired off in HW, allow front end to remapping, otherwise needs more changes*/ /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) return NULL; + vpg_inst = eng_id; + afmt_inst = eng_id; + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn31_vpg_create(ctx, vpg_inst); afmt = dcn31_afmt_create(ctx, afmt_inst); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 5a95dd54cb42..4b8668458f03 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -63,7 +63,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dml/dcn31/dcn31_fpu.h" @@ -1223,12 +1223,12 @@ static struct stream_encoder *dcn316_stream_encoder_create( int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) return NULL; + vpg_inst = eng_id; + afmt_inst = eng_id; + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn31_vpg_create(ctx, vpg_inst); afmt = dcn31_afmt_create(ctx, afmt_inst); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index b276fec3e479..a55078458ba5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -65,7 +65,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" @@ -1211,12 +1211,12 @@ static struct stream_encoder *dcn32_stream_encoder_create( int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) return NULL; + vpg_inst = eng_id; + afmt_inst = eng_id; + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn32_vpg_create(ctx, vpg_inst); afmt = dcn32_afmt_create(ctx, afmt_inst); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 3466ca34c93f..188c3f24f110 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -68,7 +68,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" @@ -1192,12 +1192,12 @@ static struct stream_encoder *dcn321_stream_encoder_create( int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) return NULL; + vpg_inst = eng_id; + afmt_inst = eng_id; + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn321_vpg_create(ctx, vpg_inst); afmt = dcn321_afmt_create(ctx, afmt_inst); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 45454a097264..5ea805fcff48 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -70,7 +70,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dcn35/dcn35_dccg.h" @@ -1274,12 +1274,12 @@ static struct stream_encoder *dcn35_stream_encoder_create( int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) return NULL; + vpg_inst = eng_id; + afmt_inst = eng_id; + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn31_vpg_create(ctx, vpg_inst); afmt = dcn31_afmt_create(ctx, afmt_inst); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index e3c587165807..424b52e2dd7b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -49,7 +49,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dcn35/dcn35_dccg.h" @@ -1254,12 +1254,12 @@ static struct stream_encoder *dcn35_stream_encoder_create( int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) return NULL; + vpg_inst = eng_id; + afmt_inst = eng_id; + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn31_vpg_create(ctx, vpg_inst); afmt = dcn31_afmt_create(ctx, afmt_inst); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index 6469d5fe2e6d..7582217bd06d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -49,7 +49,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dcn35/dcn35_dccg.h" @@ -460,16 +460,22 @@ static const struct dcn30_mpc_mask mpc_mask = { }; #define optc_regs_init(id)\ - OPTC_COMMON_REG_LIST_DCN3_5_RI(id) + OPTC_COMMON_REG_LIST_DCN3_5_RI(id),\ + SRI_ARR(OTG_CRC0_DATA_R32, OTG_CRC32, id),\ + SRI_ARR(OTG_CRC0_DATA_G32, OTG_CRC32, id),\ + SRI_ARR(OTG_CRC0_DATA_B32, OTG_CRC32, id),\ + SRI_ARR(OTG_CRC1_DATA_R32, OTG_CRC32, id),\ + SRI_ARR(OTG_CRC1_DATA_G32, OTG_CRC32, id),\ + SRI_ARR(OTG_CRC1_DATA_B32, OTG_CRC32, id) static struct dcn_optc_registers optc_regs[4]; static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT) + OPTC_COMMON_MASK_SH_LIST_DCN3_6(__SHIFT) }; static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN3_5(_MASK) + OPTC_COMMON_MASK_SH_LIST_DCN3_6(_MASK) }; #define hubp_regs_init(id)\ @@ -769,7 +775,7 @@ static const struct dc_debug_options debug_defaults_drv = { }; static const struct dc_check_config config_defaults = { - .enable_legacy_fast_update = true, + .enable_legacy_fast_update = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 1cdbb65da4a3..f5e02a1ff771 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -21,6 +21,7 @@ #include "dcn401/dcn401_hubbub.h" #include "dcn401/dcn401_mpc.h" #include "dcn401/dcn401_hubp.h" +#include "dio/dcn10/dcn10_dio.h" #include "irq/dcn401/irq_service_dcn401.h" #include "dcn401/dcn401_dpp.h" #include "dcn401/dcn401_optc.h" @@ -46,7 +47,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" +#include "dio/virtual/virtual_stream_encoder.h" #include "dml/display_mode_vba.h" #include "dcn401/dcn401_dccg.h" #include "dcn10/dcn10_resource.h" @@ -634,6 +635,22 @@ static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; +#define dio_regs_init() \ + DIO_REG_LIST_DCN10() + +static struct dcn_dio_registers dio_regs; + +#define DIO_MASK_SH_LIST_DCN401(mask_sh)\ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) + +static const struct dcn_dio_shift dio_shift = { + DIO_MASK_SH_LIST_DCN401(__SHIFT) +}; + +static const struct dcn_dio_mask dio_mask = { + DIO_MASK_SH_LIST_DCN401(_MASK) +}; + static const struct resource_caps res_cap_dcn4_01 = { .num_timing_generator = 4, .num_opp = 4, @@ -881,6 +898,22 @@ static struct hubbub *dcn401_hubbub_create(struct dc_context *ctx) return &hubbub2->base; } +static struct dio *dcn401_dio_create(struct dc_context *ctx) +{ + struct dcn10_dio *dio10 = kzalloc(sizeof(struct dcn10_dio), GFP_KERNEL); + + if (!dio10) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT dio_regs + dio_regs_init(); + + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); + + return &dio10->base; +} + static struct hubp *dcn401_hubp_create( struct dc_context *ctx, uint32_t inst) @@ -1499,6 +1532,11 @@ static void dcn401_resource_destruct(struct dcn401_resource_pool *pool) if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); + if (pool->base.dio != NULL) { + kfree(TO_DCN10_DIO(pool->base.dio)); + pool->base.dio = NULL; + } + if (pool->base.oem_device != NULL) { struct dc *dc = pool->base.oem_device->ctx->dc; @@ -2071,6 +2109,14 @@ static bool dcn401_resource_construct( goto create_fail; } + /* DIO */ + pool->base.dio = dcn401_dio_create(ctx); + if (pool->base.dio == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dio!\n"); + goto create_fail; + } + /* HUBPs, DPPs, OPPs, TGs, ABMs */ for (i = 0, j = 0; i < pool->base.res_cap->num_timing_generator; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c index 1d9edb89e47a..a75ab23b0726 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c @@ -293,7 +293,7 @@ static const uint16_t filter_isharp_bs_3tap_64p_s1_12[99] = { }; /* Pre-generated 1DLUT for given setup and sharpness level */ -struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = { +static struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = { { 0, 0, { @@ -332,7 +332,7 @@ struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = }, }; -struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_LEVELS] = { +static struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_LEVELS] = { {1125, 1000, 0}, {11, 10, 1}, {1075, 1000, 2}, diff --git a/drivers/gpu/drm/amd/display/dc/virtual/Makefile b/drivers/gpu/drm/amd/display/dc/virtual/Makefile deleted file mode 100644 index 931facd4dab5..000000000000 --- a/drivers/gpu/drm/amd/display/dc/virtual/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright 2017 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# -# Makefile for the virtual sub-component of DAL. -# It provides the control and status of HW CRTC block. - -VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o virtual_link_hwss.o - -AMD_DAL_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/virtual/,$(VIRTUAL)) - -AMD_DISPLAY_FILES += $(AMD_DAL_VIRTUAL) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 18e0bdfd6ff4..6f388c910e18 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -736,6 +736,16 @@ union pr_hw_flags { uint32_t u32All; }; +/** + * Definition of Panel Replay ML Activity Options + */ +enum pr_ml_activity_option { + OPTION_DEFAULT = 0x00, // VESA Option Default (1C) + OPTION_1A = 0x01, // VESA Option 1A + OPTION_1B = 0x02, // VESA Option 1B + OPTION_1C = 0x03, // VESA Option 1C +}; + union fw_assisted_mclk_switch_version { struct { uint8_t minor : 5; @@ -1628,6 +1638,11 @@ enum dmub_gpint_command { * DESC: Initiates IPS wake sequence. */ DMUB_GPINT__IPS_DEBUG_WAKE = 137, + /** + * DESC: Do panel power off sequence + * ARGS: 1 - Power off + */ + DMUB_GPINT__PANEL_POWER_OFF_SEQ = 138, }; /** @@ -4398,6 +4413,7 @@ enum dmub_cmd_panel_replay_type { enum dmub_cmd_panel_replay_state_update_subtype { PR_STATE_UPDATE_COASTING_VTOTAL = 0x1, PR_STATE_UPDATE_SYNC_MODE = 0x2, + PR_STATE_UPDATE_RUNTIME_FLAGS = 0x3, }; enum dmub_cmd_panel_replay_general_subtype { @@ -6691,6 +6707,13 @@ struct dmub_rb_cmd_pr_copy_settings { struct dmub_cmd_pr_copy_settings_data data; }; +union dmub_pr_runtime_flags { + struct { + uint32_t disable_abm_optimization : 1; // Disable ABM optimization for PR + } bitfields; + uint32_t u32All; +}; + struct dmub_cmd_pr_update_state_data { /** * Panel Instance. @@ -6709,6 +6732,8 @@ struct dmub_cmd_pr_update_state_data { */ uint32_t coasting_vtotal; uint32_t sync_mode; + + union dmub_pr_runtime_flags pr_runtime_flags; }; struct dmub_cmd_pr_general_cmd_data { diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9fd78fcff15c..6683ffd6aa68 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -1829,4 +1829,9 @@ struct amdgpu_partition_metrics_v1_1 { struct gpu_metrics_attr metrics_attrs[]; }; +enum amdgpu_xgmi_link_status { + AMDGPU_XGMI_LINK_INACTIVE = 0, + AMDGPU_XGMI_LINK_ACTIVE = 1, +}; + #endif diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 07641c9413d2..938361ecae05 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -243,11 +243,11 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, enum amd_pm_state_type state; int ret; - if (strncmp("battery", buf, strlen("battery")) == 0) + if (sysfs_streq(buf, "battery")) state = POWER_STATE_TYPE_BATTERY; - else if (strncmp("balanced", buf, strlen("balanced")) == 0) + else if (sysfs_streq(buf, "balanced")) state = POWER_STATE_TYPE_BALANCED; - else if (strncmp("performance", buf, strlen("performance")) == 0) + else if (sysfs_streq(buf, "performance")) state = POWER_STATE_TYPE_PERFORMANCE; else return -EINVAL; @@ -363,29 +363,28 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, enum amd_dpm_forced_level level; int ret = 0; - if (strncmp("low", buf, strlen("low")) == 0) { + if (sysfs_streq(buf, "low")) level = AMD_DPM_FORCED_LEVEL_LOW; - } else if (strncmp("high", buf, strlen("high")) == 0) { + else if (sysfs_streq(buf, "high")) level = AMD_DPM_FORCED_LEVEL_HIGH; - } else if (strncmp("auto", buf, strlen("auto")) == 0) { + else if (sysfs_streq(buf, "auto")) level = AMD_DPM_FORCED_LEVEL_AUTO; - } else if (strncmp("manual", buf, strlen("manual")) == 0) { + else if (sysfs_streq(buf, "manual")) level = AMD_DPM_FORCED_LEVEL_MANUAL; - } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { + else if (sysfs_streq(buf, "profile_exit")) level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; - } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { + else if (sysfs_streq(buf, "profile_standard")) level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; - } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { + else if (sysfs_streq(buf, "profile_min_sclk")) level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; - } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { + else if (sysfs_streq(buf, "profile_min_mclk")) level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; - } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { + else if (sysfs_streq(buf, "profile_peak")) level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; - } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) { + else if (sysfs_streq(buf, "perf_determinism")) level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM; - } else { + else return -EINVAL; - } ret = amdgpu_pm_get_access(adev); if (ret < 0) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 0f8f69481f5b..d8959e0f109c 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3464,6 +3464,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, max_sclk = 60000; max_mclk = 80000; } + if ((adev->pdev->device == 0x666f) && + (adev->pdev->revision == 0x00)) { + max_sclk = 80000; + max_mclk = 95000; + } } else if (adev->asic_type == CHIP_OLAND) { if ((adev->pdev->revision == 0xC7) || (adev->pdev->revision == 0x80) || diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 75897ac203c3..18ece5c714c5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -810,7 +810,7 @@ static int smu_early_init(struct amdgpu_ip_block *ip_block) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; smu->is_apu = false; - smu->smu_baco.state = SMU_BACO_STATE_NONE; + smu->smu_baco.state = SMU_BACO_STATE_EXIT; smu->smu_baco.platform_support = false; smu->smu_baco.maco_support = false; smu->user_dpm_profile.fan_mode = -1; @@ -1355,7 +1355,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) int i, ret; smu->pool_size = adev->pm.smu_prv_buffer_size; - smu_feature_init(smu, SMU_FEATURE_MAX); + smu_feature_init(smu, SMU_FEATURE_NUM_DEFAULT); INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); @@ -1646,7 +1646,7 @@ static int smu_smc_hw_setup(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint8_t pcie_gen = 0, pcie_width = 0; - uint64_t features_supported; + struct smu_feature_bits features_supported; int ret = 0; switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { @@ -1807,7 +1807,7 @@ static int smu_smc_hw_setup(struct smu_context *smu) return ret; } smu_feature_list_set_bits(smu, SMU_FEATURE_LIST_SUPPORTED, - (unsigned long *)&features_supported); + features_supported.bits); if (!smu_is_dpm_running(smu)) dev_info(adev->dev, "dpm has been disabled\n"); @@ -2120,9 +2120,8 @@ static int smu_reset_mp1_state(struct smu_context *smu) int ret = 0; if ((!adev->in_runpm) && (!adev->in_suspend) && - (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) == - IP_VERSION(13, 0, 10) && - !amdgpu_device_has_display_hardware(adev)) + (!amdgpu_in_reset(adev)) && !smu->is_apu && + amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(13, 0, 0)) ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); return ret; @@ -3152,10 +3151,19 @@ static int smu_read_sensor(void *handle, *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100; *size = 4; break; - case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: - ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); + case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: { + struct smu_feature_bits feature_mask; + uint32_t features[2]; + + /* TBD: need to handle for > 64 bits */ + ret = smu_feature_get_enabled_mask(smu, &feature_mask); + if (!ret) { + smu_feature_bits_to_arr32(&feature_mask, features, 64); + *(uint64_t *)data = *(uint64_t *)features; + } *size = 8; break; + } case AMDGPU_PP_SENSOR_UVD_POWER: *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; *size = 4; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 7c63c631f6d4..a6303d093c50 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -471,12 +471,30 @@ struct smu_power_context { struct smu_power_gate power_gate; }; -#define SMU_FEATURE_MAX (64) +#define SMU_FEATURE_NUM_DEFAULT (64) +#define SMU_FEATURE_MAX (128) struct smu_feature_bits { DECLARE_BITMAP(bits, SMU_FEATURE_MAX); }; +/* + * Helpers for initializing smu_feature_bits statically. + * Use SMU_FEATURE_BIT_INIT() which automatically handles array indexing: + * static const struct smu_feature_bits example = { + * .bits = { + * SMU_FEATURE_BIT_INIT(5), + * SMU_FEATURE_BIT_INIT(10), + * SMU_FEATURE_BIT_INIT(65), + * SMU_FEATURE_BIT_INIT(100) + * } + * }; + */ +#define SMU_FEATURE_BITS_ELEM(bit) ((bit) / BITS_PER_LONG) +#define SMU_FEATURE_BITS_POS(bit) ((bit) % BITS_PER_LONG) +#define SMU_FEATURE_BIT_INIT(bit) \ + [SMU_FEATURE_BITS_ELEM(bit)] = (1UL << SMU_FEATURE_BITS_POS(bit)) + enum smu_feature_list { SMU_FEATURE_LIST_SUPPORTED, SMU_FEATURE_LIST_ALLOWED, @@ -518,7 +536,6 @@ enum smu_reset_mode { enum smu_baco_state { SMU_BACO_STATE_ENTER = 0, SMU_BACO_STATE_EXIT, - SMU_BACO_STATE_NONE, }; struct smu_baco_context { @@ -1212,7 +1229,8 @@ struct pptable_funcs { * on the SMU. * &feature_mask: Enabled feature mask. */ - int (*get_enabled_mask)(struct smu_context *smu, uint64_t *feature_mask); + int (*get_enabled_mask)(struct smu_context *smu, + struct smu_feature_bits *feature_mask); /** * @feature_is_enabled: Test if a feature is enabled. @@ -2044,6 +2062,12 @@ static inline bool smu_feature_bits_empty(const struct smu_feature_bits *bits, return bitmap_empty(bits->bits, nbits); } +static inline bool smu_feature_bits_full(const struct smu_feature_bits *bits, + unsigned int nbits) +{ + return bitmap_full(bits->bits, nbits); +} + static inline void smu_feature_bits_copy(struct smu_feature_bits *dst, const unsigned long *src, unsigned int nbits) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h index 14e8d8c7a80a..ab4a64f54e79 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h @@ -226,8 +226,6 @@ int smu_v15_0_deep_sleep_control(struct smu_context *smu, int smu_v15_0_set_gfx_power_up_by_imu(struct smu_context *smu); -int smu_v15_0_set_default_dpm_tables(struct smu_context *smu); - int smu_v15_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index b22a0e91826d..0c4afd1e1aab 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -65,14 +65,15 @@ #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 #define SMU_FEATURES_HIGH_SHIFT 32 -#define SMC_DPM_FEATURE ( \ - FEATURE_DPM_PREFETCHER_MASK | \ - FEATURE_DPM_GFXCLK_MASK | \ - FEATURE_DPM_UCLK_MASK | \ - FEATURE_DPM_SOCCLK_MASK | \ - FEATURE_DPM_MP0CLK_MASK | \ - FEATURE_DPM_FCLK_MASK | \ - FEATURE_DPM_XGMI_MASK) +static const struct smu_feature_bits arcturus_dpm_features = { + .bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_XGMI_BIT) } +}; #define smnPCIE_ESM_CTRL 0x111003D0 @@ -1526,13 +1527,14 @@ static int arcturus_set_performance_level(struct smu_context *smu, static bool arcturus_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + arcturus_dpm_features.bits); } static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index 4a5dcc893665..87953a4d0a43 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -60,11 +60,13 @@ static struct gfx_user_settings { static uint32_t cyan_skillfish_sclk_default; -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \ - FEATURE_MASK(FEATURE_GFX_DPM_BIT)) +static const struct smu_feature_bits cyan_skillfish_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SOC_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT) + } +}; static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), @@ -361,7 +363,7 @@ static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; /* we need to re-init after suspend so return false */ if (adev->in_suspend) @@ -378,7 +380,8 @@ static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cyan_skillfish_sclk_default); - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + cyan_skillfish_dpm_features.bits); } static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu, @@ -565,12 +568,13 @@ static int cyan_skillfish_get_dpm_ultimate_freq(struct smu_context *smu, return 0; } -static int cyan_skillfish_get_enabled_mask(struct smu_context *smu, - uint64_t *feature_mask) +static int +cyan_skillfish_get_enabled_mask(struct smu_context *smu, + struct smu_feature_bits *feature_mask) { if (!feature_mask) return -EINVAL; - memset(feature_mask, 0xff, sizeof(*feature_mask)); + smu_feature_bits_fill(feature_mask); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index f14eed052526..737bfdfb814c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -58,16 +58,18 @@ #undef pr_info #undef pr_debug -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \ - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT) | \ - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)) +static const struct smu_feature_bits navi10_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFX_PACE_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_DCEFCLK_BIT) + } +}; #define SMU_11_0_GFX_BUSY_THRESHOLD 15 @@ -1619,13 +1621,14 @@ static int navi10_display_config_changed(struct smu_context *smu) static bool navi10_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + navi10_dpm_features.bits); } static int navi10_get_fan_speed_rpm(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 98a02fc08214..6268bc5ed3e6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -60,16 +60,18 @@ #undef pr_info #undef pr_debug -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \ - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) +static const struct smu_feature_bits sienna_cichlid_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_DCEFCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT) + } +}; #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15 @@ -1534,13 +1536,14 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu) static bool sienna_cichlid_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + sienna_cichlid_dpm_features.bits); } static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 1d0f9f8ddf9b..56efcfa327df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -751,7 +751,7 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu) uint32_t feature_mask[2]; if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) || - feature->feature_num < 64) { + feature->feature_num < SMU_FEATURE_NUM_DEFAULT) { ret = -EINVAL; goto failed; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 4de1778ea6b3..08179840697e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -58,17 +58,19 @@ #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ - FEATURE_MASK(FEATURE_GFX_DPM_BIT)) +static const struct smu_feature_bits vangogh_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT) + } +}; static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), @@ -504,7 +506,7 @@ static bool vangogh_is_dpm_running(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; /* we need to re-init after suspend so return false */ if (adev->in_suspend) @@ -515,7 +517,8 @@ static bool vangogh_is_dpm_running(struct smu_context *smu) if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + vangogh_dpm_features.bits); } static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5346b60b09b9..31e21ff8859a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -1434,11 +1434,11 @@ static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state) } static int renoir_get_enabled_mask(struct smu_context *smu, - uint64_t *feature_mask) + struct smu_feature_bits *feature_mask) { if (!feature_mask) return -EINVAL; - memset(feature_mask, 0xff, sizeof(*feature_mask)); + smu_feature_bits_fill(feature_mask); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 3b6a34644a92..ad23682217ee 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -61,15 +61,18 @@ [smu_feature] = {1, (aldebaran_feature)} #define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_DATA_CALCULATIONS) | \ - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_LCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_XGMI_BIT) | \ - FEATURE_MASK(FEATURE_DPM_VCN_BIT)) +static const struct smu_feature_bits aldebaran_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_DATA_CALCULATIONS), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_XGMI_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_VCN_BIT) + } +}; #define smnPCIE_ESM_CTRL 0x111003D0 @@ -1395,12 +1398,13 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ static bool aldebaran_is_dpm_running(struct smu_context *smu) { int ret; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + aldebaran_dpm_features.bits); } static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 3b0aa6a2e78e..63a65ea802fd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -762,7 +762,7 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu) uint32_t feature_mask[2]; if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) || - feature->feature_num < 64) + feature->feature_num < SMU_FEATURE_NUM_DEFAULT) return -EINVAL; smu_feature_list_to_arr32(smu, SMU_FEATURE_LIST_ALLOWED, feature_mask); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 9c4298736b28..468d51f5f918 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -59,14 +59,16 @@ #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) +static const struct smu_feature_bits smu_v13_0_0_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT) + } +}; #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 @@ -689,13 +691,14 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + smu_v13_0_0_dpm_features.bits); } static int smu_v13_0_0_system_features_control(struct smu_context *smu, @@ -2612,21 +2615,11 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - u32 smu_version; - int ret; /* SRIOV does not support SMU mode1 reset */ if (amdgpu_sriov_vf(adev)) return false; - /* PMFW support is available since 78.41 */ - ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (ret) - return false; - - if (smu_version < 0x004e2900) - return false; - return true; } @@ -2775,13 +2768,7 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, switch (mp1_state) { case PP_MP1_STATE_UNLOAD: - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_PrepareMp1ForUnload, - 0x55, NULL); - - if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) - ret = smu_v13_0_disable_pmfw_state(smu); - + ret = smu_cmn_set_mp1_state(smu, mp1_state); break; default: /* Ignore others */ @@ -2825,8 +2812,9 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu) /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */ smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, ¶m); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_Mode1Reset, param, NULL); + ret = smu_cmn_send_debug_smc_msg_with_param(smu, + DEBUGSMC_MSG_Mode1Reset, param); + break; case IP_VERSION(13, 0, 10): diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c index 9a34e5460b35..f2a6ecb64c03 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -52,10 +52,13 @@ #define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \ [smu_feature] = { 1, (smu_13_0_12_feature) } -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE \ - (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \ - FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_FCLK)) +static const struct smu_feature_bits smu_v13_0_12_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_DATA_CALCULATION), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK) + } +}; #define NUM_JPEG_RINGS_FW 10 #define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \ @@ -199,14 +202,14 @@ void smu_v13_0_12_tables_fini(struct smu_context *smu) } static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu, - uint64_t *feature_mask) + struct smu_feature_bits *feature_mask) { int ret; ret = smu_cmn_get_enabled_mask(smu, feature_mask); if (ret == -EIO) { - *feature_mask = 0; + smu_feature_bits_clearall(feature_mask); ret = 0; } @@ -372,14 +375,15 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) bool smu_v13_0_12_is_dpm_running(struct smu_context *smu) { int ret; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_v13_0_12_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + smu_v13_0_12_dpm_features.bits); } int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, @@ -819,6 +823,9 @@ ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp idx++; } + xcp_metrics->accumulation_counter = metrics->AccumulationCounter; + xcp_metrics->firmware_timestamp = metrics->Timestamp; + return sizeof(*xcp_metrics); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 41c61362f7fc..75b90ac0c29c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -52,24 +52,25 @@ #define mmMP1_SMN_C2PMSG_90 0x029a #define mmMP1_SMN_C2PMSG_90_BASE_IDX 1 -#define FEATURE_MASK(feature) (1ULL << feature) - #define SMU_13_0_4_UMD_PSTATE_GFXCLK 938 #define SMU_13_0_4_UMD_PSTATE_SOCCLK 938 #define SMU_13_0_4_UMD_PSTATE_FCLK 1875 -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_ISP_DPM_BIT) | \ - FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \ - FEATURE_MASK(FEATURE_GFX_DPM_BIT)) +static const struct smu_feature_bits smu_v13_0_4_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_ISP_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_IPU_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT) + } +}; static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), @@ -212,14 +213,15 @@ static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu) static bool smu_v13_0_4_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + smu_v13_0_4_dpm_features.bits); } static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index e4be727789c0..8ee5002e3d6b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -51,17 +51,19 @@ #define mmMP1_C2PMSG_33 (0xbee261 + 0xb00000 / 4) #define mmMP1_C2PMSG_33_BASE_IDX 0 -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)| \ - FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)| \ - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT)) +static const struct smu_feature_bits smu_v13_0_5_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT) + } +}; static struct cmn2asic_msg_mapping smu_v13_0_5_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), @@ -230,14 +232,15 @@ static int smu_v13_0_5_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) static bool smu_v13_0_5_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + smu_v13_0_5_dpm_features.bits); } static int smu_v13_0_5_mode_reset(struct smu_context *smu, int type) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index bd893e95515f..07592e285cf5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -76,12 +76,18 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin"); [smu_feature] = { 1, (smu_13_0_6_feature) } #define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE \ - (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \ - FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) | \ - FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) | \ - FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) | \ - FEATURE_MASK(FEATURE_DPM_VCN)) +static const struct smu_feature_bits smu_v13_0_6_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_DATA_CALCULATION), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LCLK), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_XGMI), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_VCN) + } +}; #define smnPCIE_ESM_CTRL 0x93D0 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288 @@ -2266,14 +2272,14 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, } static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu, - uint64_t *feature_mask) + struct smu_feature_bits *feature_mask) { int ret; ret = smu_cmn_get_enabled_mask(smu, feature_mask); if (ret == -EIO && !smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) { - *feature_mask = 0; + smu_feature_bits_clearall(feature_mask); ret = 0; } @@ -2283,7 +2289,7 @@ static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu, static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu) { int ret; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) return smu_v13_0_12_is_dpm_running(smu); @@ -2293,7 +2299,8 @@ static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu) if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + smu_v13_0_6_dpm_features.bits); } static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu, @@ -2557,9 +2564,10 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; int version = smu_v13_0_6_get_metrics_version(smu); struct smu_v13_0_6_partition_metrics *xcp_metrics; - MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; + struct smu_table_context *smu_table = &smu->smu_table; struct amdgpu_device *adev = smu->adev; int ret, inst, i, j, k, idx; + MetricsTableV0_t *metrics_v0; MetricsTableV1_t *metrics_v1; MetricsTableV2_t *metrics_v2; struct amdgpu_xcp *xcp; @@ -2579,22 +2587,20 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table; smu_v13_0_6_partition_metrics_init(xcp_metrics, 1, 1); - metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); - if (!metrics_v0) - return -ENOMEM; - - ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); + ret = smu_v13_0_6_get_metrics_table(smu, NULL, false); if (ret) return ret; + metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) return smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0); - metrics_v1 = (MetricsTableV1_t *)metrics_v0; - metrics_v2 = (MetricsTableV2_t *)metrics_v0; + metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; + metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); @@ -2662,6 +2668,8 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, idx++; } } + xcp_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version); + xcp_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); return sizeof(*xcp_metrics); } @@ -2670,21 +2678,21 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table { struct smu_v13_0_6_gpu_metrics *gpu_metrics; int version = smu_v13_0_6_get_metrics_version(smu); - MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; + struct smu_table_context *smu_table = &smu->smu_table; struct amdgpu_device *adev = smu->adev; int ret = 0, xcc_id, inst, i, j; + MetricsTableV0_t *metrics_v0; MetricsTableV1_t *metrics_v1; MetricsTableV2_t *metrics_v2; u16 link_width_level; u8 num_jpeg_rings; bool per_inst; - metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); - ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); + ret = smu_v13_0_6_get_metrics_table(smu, NULL, false); if (ret) return ret; - metrics_v2 = (MetricsTableV2_t *)metrics_v0; + metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; gpu_metrics = (struct smu_v13_0_6_gpu_metrics *)smu_driver_table_ptr( smu, SMU_DRIVER_TABLE_GPU_METRICS); @@ -2695,8 +2703,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table goto fill; } - metrics_v1 = (MetricsTableV1_t *)metrics_v0; - metrics_v2 = (MetricsTableV2_t *)metrics_v0; + metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; + metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; gpu_metrics->temperature_hotspot = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index 0588a5aa952d..ffb06564f830 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -140,7 +140,7 @@ extern const struct ras_smu_drv smu_v13_0_12_ras_smu_drv; SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \ SMU_MTYPE(U64), system_clock_counter); \ SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \ - SMU_MTYPE(U32), accumulation_counter); \ + SMU_MTYPE(U64), accumulation_counter); \ SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ SMU_MTYPE(U32), prochot_residency_acc); \ SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ @@ -259,7 +259,11 @@ void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, SMU_13_0_6_MAX_XCC); \ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \ SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \ - SMU_13_0_6_MAX_XCC); + SMU_13_0_6_MAX_XCC); \ + SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), accumulation_counter); \ + SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \ + SMU_MTYPE(U64), firmware_timestamp); DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_partition_metrics, SMU_13_0_6_PARTITION_METRICS_FIELDS); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 415766dbfe6c..a6c22ae86c98 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -59,19 +59,32 @@ #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) +static const struct smu_feature_bits smu_v13_0_7_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT) + } +}; #define smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 0x3b10028 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 +#define mmMP1_SMN_C2PMSG_75 0x028b +#define mmMP1_SMN_C2PMSG_75_BASE_IDX 0 + +#define mmMP1_SMN_C2PMSG_53 0x0275 +#define mmMP1_SMN_C2PMSG_53_BASE_IDX 0 + +#define mmMP1_SMN_C2PMSG_54 0x0276 +#define mmMP1_SMN_C2PMSG_54_BASE_IDX 0 + +#define DEBUGSMC_MSG_Mode1Reset 2 + #define PP_OD_FEATURE_GFXCLK_FMIN 0 #define PP_OD_FEATURE_GFXCLK_FMAX 1 #define PP_OD_FEATURE_UCLK_FMIN 2 @@ -697,13 +710,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + smu_v13_0_7_dpm_features.bits); } static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) @@ -2731,6 +2745,36 @@ static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu, return ret; } +static int smu_v13_0_7_mode1_reset(struct smu_context *smu) +{ + int ret; + + ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset); + if (!ret) { + /* disable mmio access while doing mode 1 reset*/ + smu->adev->no_hw_access = true; + /* ensure no_hw_access is globally visible before any MMIO */ + smp_mb(); + msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); + } + + return ret; +} + +static void smu_v13_0_7_init_msg_ctl(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + struct smu_msg_ctl *ctl = &smu->msg_ctl; + + smu_v13_0_init_msg_ctl(smu, smu_v13_0_7_message_map); + + /* Set up debug mailbox registers */ + ctl->config.debug_param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_53); + ctl->config.debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_75); + ctl->config.debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_54); + ctl->flags |= SMU_MSG_CTL_DEBUG_MAILBOX; +} + static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .init_allowed_features = smu_v13_0_7_init_allowed_features, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, @@ -2792,7 +2836,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .baco_enter = smu_v13_0_baco_enter, .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, - .mode1_reset = smu_v13_0_mode1_reset, + .mode1_reset = smu_v13_0_7_mode1_reset, .set_mp1_state = smu_v13_0_7_set_mp1_state, .set_df_cstate = smu_v13_0_7_set_df_cstate, .gpo_control = smu_v13_0_gpo_control, @@ -2811,5 +2855,5 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) smu->pwr_src_map = smu_v13_0_7_pwr_src_map; smu->workload_map = smu_v13_0_7_workload_map; smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION; - smu_v13_0_init_msg_ctl(smu, smu_v13_0_7_message_map); + smu_v13_0_7_init_msg_ctl(smu); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 7f70f79c3b2f..f9789b1fcbf8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -55,17 +55,19 @@ #define SMU_13_0_1_UMD_PSTATE_SOCCLK 678 #define SMU_13_0_1_UMD_PSTATE_FCLK 1800 -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ - FEATURE_MASK(FEATURE_GFX_DPM_BIT)) +static const struct smu_feature_bits yellow_carp_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT) + } +}; static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), @@ -257,14 +259,15 @@ static int yellow_carp_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) static bool yellow_carp_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + yellow_carp_dpm_features.bits); } static int yellow_carp_post_smu_init(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index cabbd234c6e2..7dc6687c3693 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -747,7 +747,7 @@ int smu_v14_0_set_allowed_mask(struct smu_context *smu) uint32_t feature_mask[2]; if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) || - feature->feature_num < 64) + feature->feature_num < SMU_FEATURE_NUM_DEFAULT) return -EINVAL; smu_feature_list_to_arr32(smu, SMU_FEATURE_LIST_ALLOWED, feature_mask); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index a4e376e8328c..dbdf7653cc53 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -72,19 +72,21 @@ #define SMU_14_0_4_UMD_PSTATE_GFXCLK 938 #define SMU_14_0_4_UMD_PSTATE_SOCCLK 938 -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ - FEATURE_MASK(FEATURE_ISP_DPM_BIT)| \ - FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \ - FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ - FEATURE_MASK(FEATURE_VPE_DPM_BIT)) +static const struct smu_feature_bits smu_v14_0_0_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_ISP_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_IPU_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_VPE_DPM_BIT) + } +}; enum smu_mall_pg_config { SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0, @@ -470,14 +472,15 @@ static int smu_v14_0_0_read_sensor(struct smu_context *smu, static bool smu_v14_0_0_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + smu_v14_0_0_dpm_features.bits); } static int smu_v14_0_0_set_watermarks_table(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 3c351ee41e68..becfd356b4e7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -56,13 +56,13 @@ #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_FCLK_BIT)) +static const struct smu_feature_bits smu_v14_0_2_dpm_features = { + .bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT) } +}; #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 #define DEBUGSMC_MSG_Mode1Reset 2 @@ -589,13 +589,14 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + smu_v14_0_2_dpm_features.bits); } static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c index d77eaac556d9..a2f446d38be8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c @@ -716,7 +716,7 @@ int smu_v15_0_set_allowed_mask(struct smu_context *smu) uint32_t feature_mask[2]; if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) || - feature->feature_num < 64) + feature->feature_num < SMU_FEATURE_NUM_DEFAULT) return -EINVAL; smu_feature_list_to_arr32(smu, SMU_FEATURE_LIST_ALLOWED, feature_mask); @@ -1726,14 +1726,6 @@ int smu_v15_0_set_gfx_power_up_by_imu(struct smu_context *smu) return ret; } -int smu_v15_0_set_default_dpm_tables(struct smu_context *smu) -{ - struct smu_table_context *smu_table = &smu->smu_table; - - return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, - smu_table->clocks_table, false); -} - int smu_v15_0_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c index b48444706c1e..660335d7bda9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c @@ -52,37 +52,32 @@ #define mmMP1_SMN_C2PMSG_32 0x0060 #define mmMP1_SMN_C2PMSG_32_BASE_IDX 1 -/* MALLPowerController message arguments (Defines for the Cache mode control) */ -#define SMU_MALL_PMFW_CONTROL 0 -#define SMU_MALL_DRIVER_CONTROL 1 +#define mmMP1_SMN_C2PMSG_33 0x0061 +#define mmMP1_SMN_C2PMSG_33_BASE_IDX 1 -/* - * MALLPowerState message arguments - * (Defines for the Allocate/Release Cache mode if in driver mode) - */ -#define SMU_MALL_EXIT_PG 0 -#define SMU_MALL_ENTER_PG 1 - -#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON +#define mmMP1_SMN_C2PMSG_34 0x0062 +#define mmMP1_SMN_C2PMSG_34_BASE_IDX 1 #define SMU_15_0_UMD_PSTATE_GFXCLK 700 #define SMU_15_0_UMD_PSTATE_SOCCLK 678 #define SMU_15_0_UMD_PSTATE_FCLK 1800 -#define FEATURE_MASK(feature) (1ULL << feature) -#define SMC_DPM_FEATURE ( \ - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ - FEATURE_MASK(FEATURE_ISP_DPM_BIT)| \ - FEATURE_MASK(FEATURE_NPU_DPM_BIT) | \ - FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ - FEATURE_MASK(FEATURE_VPE_DPM_BIT)) +static const struct smu_feature_bits smu_v15_0_0_dpm_features = { + .bits = { + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_ISP_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_NPU_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT), + SMU_FEATURE_BIT_INIT(FEATURE_VPE_DPM_BIT) + } +}; enum smu_mall_pg_config { SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0, @@ -238,6 +233,102 @@ static int smu_v15_0_0_system_features_control(struct smu_context *smu, bool en) return ret; } +static int smu_v15_0_0_update_table(struct smu_context *smu, + enum smu_table_id table_index, + int argument, + void *table_data, + bool drv2smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct amdgpu_device *adev = smu->adev; + struct smu_table *table = &smu_table->driver_table; + int table_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_TABLE, + table_index); + uint64_t address; + uint32_t table_size; + int ret; + struct smu_msg_ctl *ctl = &smu->msg_ctl; + + if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0) + return -EINVAL; + + table_size = smu_table->tables[table_index].size; + + if (drv2smu) { + memcpy(table->cpu_addr, table_data, table_size); + /* + * Flush hdp cache: to guard the content seen by + * GPU is consitent with CPU. + */ + amdgpu_hdp_flush(adev, NULL); + } + + address = table->mc_address; + + struct smu_msg_args args = { + .msg = drv2smu ? + SMU_MSG_TransferTableDram2Smu : + SMU_MSG_TransferTableSmu2Dram, + .num_args = 3, + .num_out_args = 0, + }; + + args.args[0] = table_id; + args.args[1] = (uint32_t)lower_32_bits(address); + args.args[2] = (uint32_t)upper_32_bits(address); + + ret = ctl->ops->send_msg(ctl, &args); + + if (ret) + return ret; + + if (!drv2smu) { + amdgpu_hdp_invalidate(adev, NULL); + memcpy(table_data, table->cpu_addr, table_size); + } + + return 0; +} + +static int smu_v15_0_0_set_default_dpm_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + return smu_v15_0_0_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, + smu_table->clocks_table, false); +} + +static int smu_v15_0_0_get_metrics_table(struct smu_context *smu, + void *metrics_table, + bool bypass_cache) +{ + struct smu_table_context *smu_table = &smu->smu_table; + uint32_t table_size = + smu_table->tables[SMU_TABLE_SMU_METRICS].size; + int ret; + + if (bypass_cache || + !smu_table->metrics_time || + time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { + ret = smu_v15_0_0_update_table(smu, + SMU_TABLE_SMU_METRICS, + 0, + smu_table->metrics_table, + false); + if (ret) { + dev_info(smu->adev->dev, "Failed to export SMU15_0_0 metrics table!\n"); + return ret; + } + smu_table->metrics_time = jiffies; + } + + if (metrics_table) + memcpy(metrics_table, smu_table->metrics_table, table_size); + + return 0; +} + static int smu_v15_0_0_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) @@ -247,7 +338,7 @@ static int smu_v15_0_0_get_smu_metrics_data(struct smu_context *smu, SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; - ret = smu_cmn_get_metrics_table(smu, NULL, false); + ret = smu_v15_0_0_get_metrics_table(smu, NULL, false); if (ret) return ret; @@ -441,17 +532,42 @@ static int smu_v15_0_0_read_sensor(struct smu_context *smu, return ret; } +static int smu_v15_0_0_get_enabled_mask(struct smu_context *smu, + struct smu_feature_bits *feature_mask) +{ + int ret; + struct smu_msg_ctl *ctl = &smu->msg_ctl; + + if (!feature_mask) + return -EINVAL; + + struct smu_msg_args args = { + .msg = SMU_MSG_GetEnabledSmuFeatures, + .num_args = 0, + .num_out_args = 2, + }; + + ret = ctl->ops->send_msg(ctl, &args); + + if (!ret) + smu_feature_bits_from_arr32(feature_mask, args.out_args, + SMU_FEATURE_NUM_DEFAULT); + + return ret; +} + static bool smu_v15_0_0_is_dpm_running(struct smu_context *smu) { int ret = 0; - uint64_t feature_enabled; + struct smu_feature_bits feature_enabled; - ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); + ret = smu_v15_0_0_get_enabled_mask(smu, &feature_enabled); if (ret) return false; - return !!(feature_enabled & SMC_DPM_FEATURE); + return smu_feature_bits_test_mask(&feature_enabled, + smu_v15_0_0_dpm_features.bits); } static int smu_v15_0_0_set_watermarks_table(struct smu_context *smu, @@ -521,7 +637,7 @@ static ssize_t smu_v15_0_0_get_gpu_metrics(struct smu_context *smu, SmuMetrics_t metrics; int ret = 0; - ret = smu_cmn_get_metrics_table(smu, &metrics, true); + ret = smu_v15_0_0_get_metrics_table(smu, &metrics, false); if (ret) return ret; @@ -976,26 +1092,21 @@ static int smu_v15_0_0_set_soft_freq_limited_range(struct smu_context *smu, switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: - msg_set_min = SMU_MSG_SetHardMinGfxClk; + msg_set_min = SMU_MSG_SetSoftMinGfxclk; msg_set_max = SMU_MSG_SetSoftMaxGfxClk; break; case SMU_FCLK: - msg_set_min = SMU_MSG_SetHardMinFclkByFreq; + msg_set_min = SMU_MSG_SetSoftMinFclk; msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; break; case SMU_SOCCLK: - msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; + msg_set_min = SMU_MSG_SetSoftMinSocclkByFreq; msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; break; case SMU_VCLK: case SMU_DCLK: - msg_set_min = SMU_MSG_SetHardMinVcn0; - msg_set_max = SMU_MSG_SetSoftMaxVcn0; - break; - case SMU_VCLK1: - case SMU_DCLK1: - msg_set_min = SMU_MSG_SetHardMinVcn1; - msg_set_max = SMU_MSG_SetSoftMaxVcn1; + msg_set_min = SMU_MSG_SetSoftMinVcn; + msg_set_max = SMU_MSG_SetSoftMaxVcn; break; default: return -EINVAL; @@ -1312,14 +1423,13 @@ static const struct pptable_funcs smu_v15_0_0_ppt_funcs = { .system_features_control = smu_v15_0_0_system_features_control, .dpm_set_vcn_enable = smu_v15_0_set_vcn_enable, .dpm_set_jpeg_enable = smu_v15_0_set_jpeg_enable, - .set_default_dpm_table = smu_v15_0_set_default_dpm_tables, + .set_default_dpm_table = smu_v15_0_0_set_default_dpm_tables, .read_sensor = smu_v15_0_0_read_sensor, .is_dpm_running = smu_v15_0_0_is_dpm_running, .set_watermarks_table = smu_v15_0_0_set_watermarks_table, .get_gpu_metrics = smu_v15_0_0_get_gpu_metrics, - .get_enabled_mask = smu_cmn_get_enabled_mask, + .get_enabled_mask = smu_v15_0_0_get_enabled_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, - .set_driver_table_location = smu_v15_0_set_driver_table_location, .gfx_off_control = smu_v15_0_gfx_off_control, .mode2_reset = smu_v15_0_0_mode2_reset, .get_dpm_ultimate_freq = smu_v15_0_common_get_dpm_ultimate_freq, @@ -1344,7 +1454,9 @@ static void smu_v15_0_0_init_msg_ctl(struct smu_context *smu) ctl->config.msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_30); ctl->config.resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_31); ctl->config.arg_regs[0] = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_32); - ctl->config.num_arg_regs = 1; + ctl->config.arg_regs[1] = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_33); + ctl->config.arg_regs[2] = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_34); + ctl->config.num_arg_regs = 3; ctl->ops = &smu_msg_v1_ops; ctl->default_timeout = adev->usec_timeout * 20; ctl->message_map = smu_v15_0_0_message_map; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 9bb7e3760c0f..6fd50c2fd20e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -690,7 +690,7 @@ int smu_cmn_feature_is_supported(struct smu_context *smu, } static int __smu_get_enabled_features(struct smu_context *smu, - uint64_t *enabled_features) + struct smu_feature_bits *enabled_features) { return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features); } @@ -699,7 +699,7 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) { struct amdgpu_device *adev = smu->adev; - uint64_t enabled_features; + struct smu_feature_bits enabled_features; int feature_id; if (__smu_get_enabled_features(smu, &enabled_features)) { @@ -712,7 +712,8 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu, * enabled. Also considering they have no feature_map available, the * check here can avoid unwanted feature_map check below. */ - if (enabled_features == ULLONG_MAX) + if (smu_feature_bits_full(&enabled_features, + smu->smu_feature.feature_num)) return 1; feature_id = smu_cmn_to_asic_specific_index(smu, @@ -721,7 +722,7 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu, if (feature_id < 0) return 0; - return test_bit(feature_id, (unsigned long *)&enabled_features); + return smu_feature_bits_is_set(&enabled_features, feature_id); } bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, @@ -763,45 +764,39 @@ bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, } int smu_cmn_get_enabled_mask(struct smu_context *smu, - uint64_t *feature_mask) + struct smu_feature_bits *feature_mask) { - uint32_t *feature_mask_high; - uint32_t *feature_mask_low; + uint32_t features[2]; int ret = 0, index = 0; if (!feature_mask) return -EINVAL; - feature_mask_low = &((uint32_t *)feature_mask)[0]; - feature_mask_high = &((uint32_t *)feature_mask)[1]; - index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetEnabledSmuFeatures); if (index > 0) { - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetEnabledSmuFeatures, - 0, - feature_mask_low); + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GetEnabledSmuFeatures, 0, &features[0]); if (ret) return ret; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetEnabledSmuFeatures, - 1, - feature_mask_high); + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GetEnabledSmuFeatures, 1, &features[1]); } else { - ret = smu_cmn_send_smc_msg(smu, - SMU_MSG_GetEnabledSmuFeaturesHigh, - feature_mask_high); + ret = smu_cmn_send_smc_msg( + smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &features[1]); if (ret) return ret; - ret = smu_cmn_send_smc_msg(smu, - SMU_MSG_GetEnabledSmuFeaturesLow, - feature_mask_low); + ret = smu_cmn_send_smc_msg( + smu, SMU_MSG_GetEnabledSmuFeaturesLow, &features[0]); } + if (!ret) + smu_feature_bits_from_arr32(feature_mask, features, + SMU_FEATURE_NUM_DEFAULT); + return ret; } @@ -886,7 +881,8 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, char *buf) { int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; - uint64_t feature_mask; + struct smu_feature_bits feature_mask; + uint32_t features[2]; int i, feature_index; uint32_t count = 0; size_t size = 0; @@ -894,8 +890,10 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, if (__smu_get_enabled_features(smu, &feature_mask)) return 0; - size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", - upper_32_bits(feature_mask), lower_32_bits(feature_mask)); + /* TBD: Need to handle for > 64 bits */ + smu_feature_bits_to_arr32(&feature_mask, features, 64); + size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", + features[1], features[0]); memset(sort_feature, -1, sizeof(sort_feature)); @@ -912,16 +910,18 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n", "No", "Feature", "Bit", "State"); - for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) { + for (feature_index = 0; feature_index < smu->smu_feature.feature_num; + feature_index++) { if (sort_feature[feature_index] < 0) continue; - size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n", - count++, - smu_get_feature_name(smu, sort_feature[feature_index]), - feature_index, - !!test_bit(feature_index, (unsigned long *)&feature_mask) ? - "enabled" : "disabled"); + size += sysfs_emit_at( + buf, size, "%02d. %-20s (%2d) : %s\n", count++, + smu_get_feature_name(smu, sort_feature[feature_index]), + feature_index, + smu_feature_bits_is_set(&feature_mask, feature_index) ? + "enabled" : + "disabled"); } return size; @@ -931,7 +931,8 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) { int ret = 0; - uint64_t feature_mask; + struct smu_feature_bits feature_mask; + uint64_t feature_mask_u64; uint64_t feature_2_enabled = 0; uint64_t feature_2_disabled = 0; @@ -939,8 +940,9 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu, if (ret) return ret; - feature_2_enabled = ~feature_mask & new_mask; - feature_2_disabled = feature_mask & ~new_mask; + feature_mask_u64 = *(uint64_t *)feature_mask.bits; + feature_2_enabled = ~feature_mask_u64 & new_mask; + feature_2_disabled = feature_mask_u64 & ~new_mask; if (feature_2_enabled) { ret = smu_cmn_feature_update_enable_state(smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 92ad2ece7a36..b7bfddc65fb2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -141,7 +141,7 @@ bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type); int smu_cmn_get_enabled_mask(struct smu_context *smu, - uint64_t *feature_mask); + struct smu_feature_bits *feature_mask); uint64_t smu_cmn_get_indep_throttler_status( const unsigned long dep_status, diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c index 29df98948703..210fbd8851a6 100644 --- a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c @@ -299,7 +299,7 @@ static int aca_parse_xgmi_bank(struct ras_core_context *ras_core, count = ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); if (bank->ecc_type == RAS_ERR_TYPE__UE) { - if (ext_error_code != 0 && ext_error_code != 9) + if (ext_error_code != 0 && ext_error_code != 1 && ext_error_code != 9) count = 0ULL; ecc->ue_count = count; } else if (bank->ecc_type == RAS_ERR_TYPE__CE) { diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index aa9a0b60e727..a799447a7575 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -1150,7 +1150,7 @@ static void __drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm, addr->dir); else if (dpagemap && dpagemap->ops->device_unmap) dpagemap->ops->device_unmap(dpagemap, - dev, *addr); + dev, addr); i += 1 << addr->order; } diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c index 38eca94f01a1..5af518a93192 100644 --- a/drivers/gpu/drm/drm_pagemap.c +++ b/drivers/gpu/drm/drm_pagemap.c @@ -318,7 +318,7 @@ static void drm_pagemap_migrate_unmap_pages(struct device *dev, struct drm_pagemap_zdd *zdd = page->zone_device_data; struct drm_pagemap *dpagemap = zdd->dpagemap; - dpagemap->ops->device_unmap(dpagemap, dev, pagemap_addr[i]); + dpagemap->ops->device_unmap(dpagemap, dev, &pagemap_addr[i]); } else { dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir); diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c index 68c01932f7b4..e06f324027be 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.c +++ b/drivers/gpu/drm/i915/display/intel_acpi.c @@ -96,6 +96,7 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle) if (!pkg->package.count) { DRM_DEBUG_DRIVER("no connection in _DSM\n"); + ACPI_FREE(pkg); return; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 7e022c47e8ac..559cf3bb23fd 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2557,6 +2557,9 @@ bool intel_dp_mode_valid_with_dsc(struct intel_connector *connector, if (min_bpp_x16 <= 0 || min_bpp_x16 > max_bpp_x16) return false; + if (dsc_slice_count == 0) + return false; + return is_bw_sufficient_for_dsc_config(intel_dp, link_clock, lane_count, mode_clock, mode_hdisplay, @@ -2665,6 +2668,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, bool dsc, struct link_config_limits *limits) { + struct intel_display *display = to_intel_display(intel_dp); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); struct intel_connector *connector = to_intel_connector(conn_state->connector); @@ -2677,8 +2681,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, limits->min_lane_count = intel_dp_min_lane_count(intel_dp); limits->max_lane_count = intel_dp_max_lane_count(intel_dp); - limits->pipe.min_bpp = intel_dp_in_hdr_mode(conn_state) ? 30 : - intel_dp_min_bpp(crtc_state->output_format); + limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format); if (is_mst) { /* * FIXME: If all the streams can't fit into the link with their @@ -2694,6 +2697,19 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, respect_downstream_limits); } + if (!dsc && intel_dp_in_hdr_mode(conn_state)) { + if (intel_dp_supports_dsc(intel_dp, connector, crtc_state) && + limits->pipe.max_bpp >= 30) + limits->pipe.min_bpp = max(limits->pipe.min_bpp, 30); + else + drm_dbg_kms(display->drm, + "[CONNECTOR:%d:%s] Can't force 30 bpp for HDR (pipe bpp: %d-%d DSC-support: %s)\n", + connector->base.base.id, connector->base.name, + limits->pipe.min_bpp, limits->pipe.max_bpp, + str_yes_no(intel_dp_supports_dsc(intel_dp, connector, + crtc_state))); + } + if (dsc && !intel_dp_dsc_compute_pipe_bpp_limits(connector, limits)) return false; @@ -2825,10 +2841,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, } drm_dbg_kms(display->drm, - "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " link rate required %d available %d\n", + "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " HDR %s link rate required %d available %d\n", pipe_config->lane_count, pipe_config->port_clock, pipe_config->pipe_bpp, FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16), + str_yes_no(intel_dp_in_hdr_mode(conn_state)), intel_dp_config_required_rate(pipe_config), intel_dp_max_link_data_rate(intel_dp, pipe_config->port_clock, diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index d2e16b79d6be..1abbdd426e58 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -239,7 +239,7 @@ static struct intel_quirk intel_quirks[] = { { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, /* Dell XPS 13 7390 2-in-1 */ - { 0x8a12, 0x1028, 0x08b0, quirk_edp_limit_rate_hbr2 }, + { 0x8a52, 0x1028, 0x08b0, quirk_edp_limit_rate_hbr2 }, }; static const struct intel_dpcd_quirk intel_dpcd_quirks[] = { diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 9deb91970d4d..f12227145ef0 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2925,6 +2925,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, max_sclk = 60000; max_mclk = 80000; } + if ((rdev->pdev->device == 0x666f) && + (rdev->pdev->revision == 0x00)) { + max_sclk = 80000; + max_mclk = 95000; + } } else if (rdev->family == CHIP_OLAND) { if ((rdev->pdev->revision == 0xC7) || (rdev->pdev->revision == 0x80) || diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index e9180b01a4e4..5dd8fcacbb80 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1941,7 +1941,7 @@ static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf) int err = 0; int idx; - if (!drm_dev_enter(&xe->drm, &idx)) + if (xe_device_wedged(xe) || !drm_dev_enter(&xe->drm, &idx)) return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm); diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h index fed57be0b90e..f3683bc7eb90 100644 --- a/drivers/gpu/drm/xe/xe_configfs.h +++ b/drivers/gpu/drm/xe/xe_configfs.h @@ -21,9 +21,11 @@ bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev); bool xe_configfs_media_gt_allowed(struct pci_dev *pdev); u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev); bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev); -u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class, +u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, + enum xe_engine_class class, const u32 **cs); -u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class, +u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, + enum xe_engine_class class, const u32 **cs); #ifdef CONFIG_PCI_IOV unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev); @@ -37,9 +39,11 @@ static inline bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev) { return static inline bool xe_configfs_media_gt_allowed(struct pci_dev *pdev) { return true; } static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; } static inline bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) { return false; } -static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class, +static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, + enum xe_engine_class class, const u32 **cs) { return 0; } -static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class, +static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, + enum xe_engine_class class, const u32 **cs) { return 0; } static inline unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev) { return UINT_MAX; } #endif diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c index 91ac22ef5703..fe944687728c 100644 --- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c +++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c @@ -191,7 +191,7 @@ int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt) struct xe_device *xe = gt_to_xe(gt); int err; - if (!xe_gt_ccs_mode_enabled(gt)) + if (!xe_gt_ccs_mode_enabled(gt) || IS_SRIOV_VF(xe)) return 0; err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs); diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index baf277955b33..0fd4d4f1014a 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -48,7 +48,7 @@ enum xe_hwmon_channel { CHANNEL_MCTRL, CHANNEL_PCIE, CHANNEL_VRAM_N, - CHANNEL_VRAM_N_MAX = CHANNEL_VRAM_N + MAX_VRAM_CHANNELS, + CHANNEL_VRAM_N_MAX = CHANNEL_VRAM_N + MAX_VRAM_CHANNELS - 1, CHANNEL_MAX, }; @@ -264,7 +264,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg return BMG_PACKAGE_TEMPERATURE; else if (channel == CHANNEL_VRAM) return BMG_VRAM_TEMPERATURE; - else if (in_range(channel, CHANNEL_VRAM_N, CHANNEL_VRAM_N_MAX)) + else if (in_range(channel, CHANNEL_VRAM_N, MAX_VRAM_CHANNELS)) return BMG_VRAM_TEMPERATURE_N(channel - CHANNEL_VRAM_N); } else if (xe->info.platform == XE_DG2) { if (channel == CHANNEL_PKG) @@ -1427,7 +1427,7 @@ static int xe_hwmon_read_label(struct device *dev, *str = "mctrl"; else if (channel == CHANNEL_PCIE) *str = "pcie"; - else if (in_range(channel, CHANNEL_VRAM_N, CHANNEL_VRAM_N_MAX)) + else if (in_range(channel, CHANNEL_VRAM_N, MAX_VRAM_CHANNELS)) *str = hwmon->temp.vram_label[channel - CHANNEL_VRAM_N]; return 0; case hwmon_power: diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index bcb6674b7dac..a1a05c68dc7d 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -256,11 +256,11 @@ u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg) struct xe_reg reg_udw = { .addr = reg.addr + 0x4 }; u32 ldw, udw, oldudw, retries; - reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr); - reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr); - - /* we shouldn't adjust just one register address */ - xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4); + /* + * The two dwords of a 64-bit register can never straddle the offset + * adjustment cutoff. + */ + xe_tile_assert(mmio->tile, !in_range(mmio->adj_limit, reg.addr + 1, 7)); oldudw = xe_mmio_read32(mmio, reg_udw); for (retries = 5; retries; --retries) { diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h index 1c75f38ca393..79cb9639c0f3 100644 --- a/drivers/gpu/drm/xe/xe_module.h +++ b/drivers/gpu/drm/xe/xe_module.h @@ -12,7 +12,7 @@ struct xe_modparam { bool force_execlist; bool probe_display; - u32 force_vram_bar_size; + int force_vram_bar_size; int guc_log_level; char *guc_firmware_path; char *huc_firmware_path; diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 3805be561751..5c0b3224f20d 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -553,6 +553,12 @@ static int read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u struct xe_gt *gt __free(kfree) = NULL; int err; + /* Don't try to read media ver if media GT is not allowed */ + if (type == GMDID_MEDIA && !xe_configfs_media_gt_allowed(to_pci_dev(xe->drm.dev))) { + *ver = *revid = 0; + return 0; + } + gt = kzalloc(sizeof(*gt), GFP_KERNEL); if (!gt) return -ENOMEM; diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c index 3d140506ba36..82a1055985ba 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c @@ -349,18 +349,33 @@ static const struct attribute_group *xe_sriov_vf_attr_groups[] = { /* no user serviceable parts below */ -static struct kobject *create_xe_sriov_kobj(struct xe_device *xe, unsigned int vfid) +static void action_put_kobject(void *arg) +{ + struct kobject *kobj = arg; + + kobject_put(kobj); +} + +static struct kobject *create_xe_sriov_kobj(struct xe_device *xe, unsigned int vfid, + const struct kobj_type *ktype) { struct xe_sriov_kobj *vkobj; + int err; xe_sriov_pf_assert_vfid(xe, vfid); vkobj = kzalloc(sizeof(*vkobj), GFP_KERNEL); if (!vkobj) - return NULL; + return ERR_PTR(-ENOMEM); vkobj->xe = xe; vkobj->vfid = vfid; + kobject_init(&vkobj->base, ktype); + + err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, &vkobj->base); + if (err) + return ERR_PTR(err); + return &vkobj->base; } @@ -463,28 +478,17 @@ static void pf_sysfs_note(struct xe_device *xe, int err, const char *what) xe_sriov_dbg(xe, "Failed to setup sysfs %s (%pe)\n", what, ERR_PTR(err)); } -static void action_put_kobject(void *arg) -{ - struct kobject *kobj = arg; - - kobject_put(kobj); -} - static int pf_setup_root(struct xe_device *xe) { struct kobject *parent = &xe->drm.dev->kobj; struct kobject *root; int err; - root = create_xe_sriov_kobj(xe, PFID); - if (!root) - return pf_sysfs_error(xe, -ENOMEM, "root obj"); - - err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, root); - if (err) - return pf_sysfs_error(xe, err, "root action"); + root = create_xe_sriov_kobj(xe, PFID, &xe_sriov_dev_ktype); + if (IS_ERR(root)) + return pf_sysfs_error(xe, PTR_ERR(root), "root obj"); - err = kobject_init_and_add(root, &xe_sriov_dev_ktype, parent, "sriov_admin"); + err = kobject_add(root, parent, "sriov_admin"); if (err) return pf_sysfs_error(xe, err, "root init"); @@ -505,20 +509,14 @@ static int pf_setup_tree(struct xe_device *xe) root = xe->sriov.pf.sysfs.root; for (n = 0; n <= totalvfs; n++) { - kobj = create_xe_sriov_kobj(xe, VFID(n)); - if (!kobj) - return pf_sysfs_error(xe, -ENOMEM, "tree obj"); - - err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, root); - if (err) - return pf_sysfs_error(xe, err, "tree action"); + kobj = create_xe_sriov_kobj(xe, VFID(n), &xe_sriov_vf_ktype); + if (IS_ERR(kobj)) + return pf_sysfs_error(xe, PTR_ERR(kobj), "tree obj"); if (n) - err = kobject_init_and_add(kobj, &xe_sriov_vf_ktype, - root, "vf%u", n); + err = kobject_add(kobj, root, "vf%u", n); else - err = kobject_init_and_add(kobj, &xe_sriov_vf_ktype, - root, "pf"); + err = kobject_add(kobj, root, "pf"); if (err) return pf_sysfs_error(xe, err, "tree init"); diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 213f0334518a..78f4b2c60670 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -1676,13 +1676,13 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap, static void xe_drm_pagemap_device_unmap(struct drm_pagemap *dpagemap, struct device *dev, - struct drm_pagemap_addr addr) + const struct drm_pagemap_addr *addr) { - if (addr.proto != XE_INTERCONNECT_P2P) + if (addr->proto != XE_INTERCONNECT_P2P) return; - dma_unmap_resource(dev, addr.addr, PAGE_SIZE << addr.order, - addr.dir, DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_resource(dev, addr->addr, PAGE_SIZE << addr->order, + addr->dir, DMA_ATTR_SKIP_CPU_SYNC); } static void xe_pagemap_destroy_work(struct work_struct *work) diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c index add9a6ca2390..52147f5eaaa0 100644 --- a/drivers/gpu/drm/xe/xe_vm_madvise.c +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c @@ -291,8 +291,13 @@ static bool madvise_args_are_sane(struct xe_device *xe, const struct drm_xe_madv break; case DRM_XE_MEM_RANGE_ATTR_PAT: { - u16 coh_mode = xe_pat_index_get_coh_mode(xe, args->pat_index.val); + u16 pat_index, coh_mode; + if (XE_IOCTL_DBG(xe, args->pat_index.val >= xe->pat.n_entries)) + return false; + + pat_index = array_index_nospec(args->pat_index.val, xe->pat.n_entries); + coh_mode = xe_pat_index_get_coh_mode(xe, pat_index); if (XE_IOCTL_DBG(xe, !coh_mode)) return false; diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index a991ee2b8781..c7b1bd79ab17 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -548,16 +548,6 @@ static const struct xe_rtp_entry_sr engine_was[] = { FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS)) }, - { XE_RTP_NAME("14019988906"), - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), - FUNC(xe_rtp_match_first_render_or_compute)), - XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD)) - }, - { XE_RTP_NAME("14019877138"), - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), - FUNC(xe_rtp_match_first_render_or_compute)), - XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT)) - }, { XE_RTP_NAME("14020338487"), XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), FUNC(xe_rtp_match_first_render_or_compute)), @@ -833,6 +823,14 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(WM_CHICKEN3, HIZ_PLANE_COMPRESSION_DIS)) }, + { XE_RTP_NAME("14019988906"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD)) + }, + { XE_RTP_NAME("14019877138"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT)) + }, { XE_RTP_NAME("14021490052"), XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(FF_MODE, diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h index 2baf0861f78f..c848f578e3da 100644 --- a/include/drm/drm_pagemap.h +++ b/include/drm/drm_pagemap.h @@ -95,7 +95,7 @@ struct drm_pagemap_ops { */ void (*device_unmap)(struct drm_pagemap *dpagemap, struct device *dev, - struct drm_pagemap_addr addr); + const struct drm_pagemap_addr *addr); /** * @populate_mm: Populate part of the mm with @dpagemap memory, diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 1d34daa0ebcd..ebbd861ef0bc 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -1667,6 +1667,7 @@ struct drm_amdgpu_info_uq_metadata { #define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */ #define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */ #define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */ +#define AMDGPU_FAMILY_GC_11_5_4 154 /* GC 11.5.4 */ #define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */ #if defined(__cplusplus) |
