summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/legacy-dpm
diff options
context:
space:
mode:
authorEvan Quan <evan.quan@amd.com>2021-11-16 13:30:53 +0800
committerAlex Deucher <alexander.deucher@amd.com>2022-01-14 17:51:14 -0500
commit837d542a09cd533055423dfca7e621a9c1d13c5b (patch)
treeff56aed3ac28491a9eae0a39e5c98ce5e423c985 /drivers/gpu/drm/amd/pm/legacy-dpm
parentebfc253335af81db2e40e6e8ed17cd76edf9080f (diff)
downloadlinux-837d542a09cd533055423dfca7e621a9c1d13c5b.tar.gz
linux-837d542a09cd533055423dfca7e621a9c1d13c5b.tar.bz2
linux-837d542a09cd533055423dfca7e621a9c1d13c5b.zip
drm/amd/pm: relocate the power related headers
Instead of centralizing all headers in the same folder. Separate them into different folders and place them among those source files those who really need them. Signed-off-by: Evan Quan <evan.quan@amd.com> Reviewed-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/pm/legacy-dpm')
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/Makefile32
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h29
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c3405
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h229
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c218
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c1080
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h38
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h200
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h127
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c8153
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h1022
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c273
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h431
13 files changed, 15237 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile b/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile
new file mode 100644
index 000000000000..baa4265d1daa
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2021 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+
+AMD_LEGACYDPM_PATH = ../pm/legacy-dpm
+
+LEGACYDPM_MGR-y = legacy_dpm.o
+
+LEGACYDPM_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o
+LEGACYDPM_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o
+
+AMD_LEGACYDPM_POWER = $(addprefix $(AMD_LEGACYDPM_PATH)/,$(LEGACYDPM_MGR-y))
+
+AMD_POWERPLAY_FILES += $(AMD_LEGACYDPM_POWER)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h
new file mode 100644
index 000000000000..2fcc4b60153c
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CIK_DPM_H__
+#define __CIK_DPM_H__
+
+extern const struct amdgpu_ip_block_version kv_smu_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
new file mode 100644
index 000000000000..72824ef61edd
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -0,0 +1,3405 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "cikd.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_dpm.h"
+#include "kv_dpm.h"
+#include "gfx_v7_0.h"
+#include <linux/seq_file.h>
+
+#include "smu/smu_7_0_0_d.h"
+#include "smu/smu_7_0_0_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+#include "legacy_dpm.h"
+
+#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define KV_MINIMUM_ENGINE_CLOCK 800
+#define SMC_RAM_END 0x40000
+
+static const struct amd_pm_funcs kv_dpm_funcs;
+
+static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
+static int kv_enable_nb_dpm(struct amdgpu_device *adev,
+ bool enable);
+static void kv_init_graphics_levels(struct amdgpu_device *adev);
+static int kv_calculate_ds_divider(struct amdgpu_device *adev);
+static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);
+static int kv_calculate_dpm_settings(struct amdgpu_device *adev);
+static void kv_enable_new_levels(struct amdgpu_device *adev);
+static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_rps);
+static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);
+static int kv_set_enabled_levels(struct amdgpu_device *adev);
+static int kv_force_dpm_highest(struct amdgpu_device *adev);
+static int kv_force_dpm_lowest(struct amdgpu_device *adev);
+static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_rps,
+ struct amdgpu_ps *old_rps);
+static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
+ int min_temp, int max_temp);
+static int kv_init_fps_limits(struct amdgpu_device *adev);
+
+static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
+static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
+
+
+static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_2bit)
+{
+ struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 i;
+
+ if (vddc_sclk_table && vddc_sclk_table->count) {
+ if (vid_2bit < vddc_sclk_table->count)
+ return vddc_sclk_table->entries[vid_2bit].v;
+ else
+ return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
+ } else {
+ for (i = 0; i < vid_mapping_table->num_entries; i++) {
+ if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
+ return vid_mapping_table->entries[i].vid_7bit;
+ }
+ return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
+ }
+}
+
+static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_7bit)
+{
+ struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 i;
+
+ if (vddc_sclk_table && vddc_sclk_table->count) {
+ for (i = 0; i < vddc_sclk_table->count; i++) {
+ if (vddc_sclk_table->entries[i].v == vid_7bit)
+ return i;
+ }
+ return vddc_sclk_table->count - 1;
+ } else {
+ for (i = 0; i < vid_mapping_table->num_entries; i++) {
+ if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
+ return vid_mapping_table->entries[i].vid_2bit;
+ }
+
+ return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
+ }
+}
+
+static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
+{
+/* This bit selects who handles display phy powergating.
+ * Clear the bit to let atom handle it.
+ * Set it to let the driver handle it.
+ * For now we just let atom handle it.
+ */
+#if 0
+ u32 v = RREG32(mmDOUT_SCRATCH3);
+
+ if (enable)
+ v |= 0x4;
+ else
+ v &= 0xFFFFFFFB;
+
+ WREG32(mmDOUT_SCRATCH3, v);
+#endif
+}
+
+static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
+ struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
+ ATOM_AVAILABLE_SCLK_LIST *table)
+{
+ u32 i;
+ u32 n = 0;
+ u32 prev_sclk = 0;
+
+ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+ if (table[i].ulSupportedSCLK > prev_sclk) {
+ sclk_voltage_mapping_table->entries[n].sclk_frequency =
+ table[i].ulSupportedSCLK;
+ sclk_voltage_mapping_table->entries[n].vid_2bit =
+ table[i].usVoltageIndex;
+ prev_sclk = table[i].ulSupportedSCLK;
+ n++;
+ }
+ }
+
+ sclk_voltage_mapping_table->num_max_dpm_entries = n;
+}
+
+static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ ATOM_AVAILABLE_SCLK_LIST *table)
+{
+ u32 i, j;
+
+ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+ if (table[i].ulSupportedSCLK != 0) {
+ vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
+ table[i].usVoltageID;
+ vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+ table[i].usVoltageIndex;
+ }
+ }
+
+ for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
+ if (vid_mapping_table->entries[i].vid_7bit == 0) {
+ for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
+ if (vid_mapping_table->entries[j].vid_7bit != 0) {
+ vid_mapping_table->entries[i] =
+ vid_mapping_table->entries[j];
+ vid_mapping_table->entries[j].vid_7bit = 0;
+ break;
+ }
+ }
+
+ if (j == SUMO_MAX_NUMBER_VOLTAGES)
+ break;
+ }
+ }
+
+ vid_mapping_table->num_entries = i;
+}
+
+#if 0
+static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 1, 4, 1 },
+ { 2, 5, 1 },
+ { 3, 4, 2 },
+ { 4, 1, 1 },
+ { 5, 5, 2 },
+ { 6, 6, 1 },
+ { 7, 9, 2 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 1, 4, 1 },
+ { 2, 5, 1 },
+ { 3, 4, 1 },
+ { 4, 1, 1 },
+ { 5, 5, 1 },
+ { 6, 6, 1 },
+ { 7, 9, 1 },
+ { 8, 4, 1 },
+ { 9, 2, 1 },
+ { 10, 3, 1 },
+ { 11, 6, 1 },
+ { 12, 8, 2 },
+ { 13, 1, 1 },
+ { 14, 2, 1 },
+ { 15, 3, 1 },
+ { 16, 1, 1 },
+ { 17, 4, 1 },
+ { 18, 3, 1 },
+ { 19, 1, 1 },
+ { 20, 8, 1 },
+ { 21, 5, 1 },
+ { 22, 1, 1 },
+ { 23, 1, 1 },
+ { 24, 4, 1 },
+ { 27, 6, 1 },
+ { 28, 1, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
+{
+ { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
+{
+ { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
+{
+ { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
+{
+ { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
+{
+ { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
+{
+ { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+#endif
+
+static const struct kv_pt_config_reg didt_config_kv[] =
+{
+ { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
+static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)
+{
+ struct kv_ps *ps = rps->ps_priv;
+
+ return ps;
+}
+
+static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = adev->pm.dpm.priv;
+
+ return pi;
+}
+
+#if 0
+static void kv_program_local_cac_table(struct amdgpu_device *adev,
+ const struct kv_lcac_config_values *local_cac_table,
+ const struct kv_lcac_config_reg *local_cac_reg)
+{
+ u32 i, count, data;
+ const struct kv_lcac_config_values *values = local_cac_table;
+
+ while (values->block_id != 0xffffffff) {
+ count = values->signal_id;
+ for (i = 0; i < count; i++) {
+ data = ((values->block_id << local_cac_reg->block_shift) &
+ local_cac_reg->block_mask);
+ data |= ((i << local_cac_reg->signal_shift) &
+ local_cac_reg->signal_mask);
+ data |= ((values->t << local_cac_reg->t_shift) &
+ local_cac_reg->t_mask);
+ data |= ((1 << local_cac_reg->enable_shift) &
+ local_cac_reg->enable_mask);
+ WREG32_SMC(local_cac_reg->cntl, data);
+ }
+ values++;
+ }
+}
+#endif
+
+static int kv_program_pt_config_registers(struct amdgpu_device *adev,
+ const struct kv_pt_config_reg *cac_config_regs)
+{
+ const struct kv_pt_config_reg *config_regs = cac_config_regs;
+ u32 data;
+ u32 cache = 0;
+
+ if (config_regs == NULL)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == KV_CONFIGREG_CACHE) {
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ } else {
+ switch (config_regs->type) {
+ case KV_CONFIGREG_SMC_IND:
+ data = RREG32_SMC(config_regs->offset);
+ break;
+ case KV_CONFIGREG_DIDT_IND:
+ data = RREG32_DIDT(config_regs->offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+ cache = 0;
+
+ switch (config_regs->type) {
+ case KV_CONFIGREG_SMC_IND:
+ WREG32_SMC(config_regs->offset, data);
+ break;
+ case KV_CONFIGREG_DIDT_IND:
+ WREG32_DIDT(config_regs->offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset, data);
+ break;
+ }
+ }
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 data;
+
+ if (pi->caps_sq_ramping) {
+ data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
+ if (enable)
+ data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
+ }
+
+ if (pi->caps_db_ramping) {
+ data = RREG32_DIDT(ixDIDT_DB_CTRL0);
+ if (enable)
+ data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_DB_CTRL0, data);
+ }
+
+ if (pi->caps_td_ramping) {
+ data = RREG32_DIDT(ixDIDT_TD_CTRL0);
+ if (enable)
+ data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_TD_CTRL0, data);
+ }
+
+ if (pi->caps_tcp_ramping) {
+ data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
+ if (enable)
+ data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
+ }
+}
+
+static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ if (pi->caps_sq_ramping ||
+ pi->caps_db_ramping ||
+ pi->caps_td_ramping ||
+ pi->caps_tcp_ramping) {
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ if (enable) {
+ ret = kv_program_pt_config_registers(adev, didt_config_kv);
+ if (ret) {
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ return ret;
+ }
+ }
+
+ kv_do_enable_didt(adev, enable);
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ return 0;
+}
+
+#if 0
+static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ if (pi->caps_cac) {
+ WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
+
+ WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
+
+ WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
+
+ WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
+
+ WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
+
+ WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
+ }
+}
+#endif
+
+static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret = 0;
+
+ if (pi->caps_cac) {
+ if (enable) {
+ ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);
+ if (ret)
+ pi->cac_enabled = false;
+ else
+ pi->cac_enabled = true;
+ } else if (pi->cac_enabled) {
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);
+ pi->cac_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int kv_process_firmware_header(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 tmp;
+ int ret;
+
+ ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
+ &tmp, pi->sram_end);
+
+ if (ret == 0)
+ pi->dpm_table_start = tmp;
+
+ ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
+ &tmp, pi->sram_end);
+
+ if (ret == 0)
+ pi->soft_regs_start = tmp;
+
+ return ret;
+}
+
+static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ pi->graphics_voltage_change_enable = 1;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
+ &pi->graphics_voltage_change_enable,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_set_dpm_interval(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ pi->graphics_interval = 1;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
+ &pi->graphics_interval,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_set_dpm_boot_state(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
+ &pi->graphics_boot_level,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static void kv_program_vc(struct amdgpu_device *adev)
+{
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);
+}
+
+static void kv_clear_vc(struct amdgpu_device *adev)
+{
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
+}
+
+static int kv_set_divider_value(struct amdgpu_device *adev,
+ u32 index, u32 sclk)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct atom_clock_dividers dividers;
+ int ret;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ sclk, false, &dividers);
+ if (ret)
+ return ret;
+
+ pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
+ pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
+
+ return 0;
+}
+
+static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
+ u16 voltage)
+{
+ return 6200 - (voltage * 25);
+}
+
+static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,
+ u32 vid_2bit)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 vid_8bit = kv_convert_vid2_to_vid7(adev,
+ &pi->sys_info.vid_mapping_table,
+ vid_2bit);
+
+ return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);
+}
+
+
+static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
+ pi->graphics_level[index].MinVddNb =
+ cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));
+
+ return 0;
+}
+
+static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->graphics_level[index].AT = cpu_to_be16((u16)at);
+
+ return 0;
+}
+
+static void kv_dpm_power_level_enable(struct amdgpu_device *adev,
+ u32 index, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
+}
+
+static void kv_start_dpm(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+
+ tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+
+ amdgpu_kv_smc_dpm_enable(adev, true);
+}
+
+static void kv_stop_dpm(struct amdgpu_device *adev)
+{
+ amdgpu_kv_smc_dpm_enable(adev, false);
+}
+
+static void kv_start_am(struct amdgpu_device *adev)
+{
+ u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+
+ sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
+ SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
+ sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
+
+ WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static void kv_reset_am(struct amdgpu_device *adev)
+{
+ u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+
+ sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
+ SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
+
+ WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)
+{
+ return amdgpu_kv_notify_message_to_smu(adev, freeze ?
+ PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+}
+
+static int kv_force_lowest_valid(struct amdgpu_device *adev)
+{
+ return kv_force_dpm_lowest(adev);
+}
+
+static int kv_unforce_levels(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
+ return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);
+ else
+ return kv_set_enabled_levels(adev);
+}
+
+static int kv_update_sclk_t(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 low_sclk_interrupt_t = 0;
+ int ret = 0;
+
+ if (pi->caps_sclk_throttle_low_notification) {
+ low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
+ (u8 *)&low_sclk_interrupt_t,
+ sizeof(u32), pi->sram_end);
+ }
+ return ret;
+}
+
+static int kv_program_bootup_state(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 i;
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].clk == pi->boot_pl.sclk)
+ break;
+ }
+
+ pi->graphics_boot_level = (u8)i;
+ kv_dpm_power_level_enable(adev, i, true);
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ if (table->num_max_dpm_entries == 0)
+ return -EINVAL;
+
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
+ break;
+ }
+
+ pi->graphics_boot_level = (u8)i;
+ kv_dpm_power_level_enable(adev, i, true);
+ }
+ return 0;
+}
+
+static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ pi->graphics_therm_throttle_enable = 1;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
+ &pi->graphics_therm_throttle_enable,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_upload_dpm_settings(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
+ (u8 *)&pi->graphics_level,
+ sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
+ pi->sram_end);
+
+ if (ret)
+ return ret;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),