summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c75
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c6
-rw-r--r--drivers/gpu/drm/drm_atomic.c1
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c166
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c4
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c2
37 files changed, 280 insertions, 191 deletions
diff --git a/.mailmap b/.mailmap
index 66754ca6656d..424564f40733 100644
--- a/.mailmap
+++ b/.mailmap
@@ -136,6 +136,9 @@ Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
Eugen Hristev <eugen.hristev@collabora.com> <eugen.hristev@microchip.com>
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> <ezequiel@collabora.com>
+Faith Ekstrand <faith.ekstrand@collabora.com> <jason@jlekstrand.net>
+Faith Ekstrand <faith.ekstrand@collabora.com> <jason.ekstrand@intel.com>
+Faith Ekstrand <faith.ekstrand@collabora.com> <jason.ekstrand@collabora.com>
Felipe W Damasio <felipewd@terra.com.br>
Felix Kuhling <fxkuehl@gmx.de>
Felix Moeller <felix@derklecks.de>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index b719852daa07..1a3cb53d2e0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -543,6 +543,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
struct harvest_table *harvest_info;
u16 offset;
int i;
+ uint32_t umc_harvest_config = 0;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
@@ -570,12 +571,17 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
break;
case UMC_HWID:
+ umc_harvest_config |=
+ 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
(*umc_harvest_count)++;
break;
default:
break;
}
}
+
+ adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
+ ~umc_harvest_config;
}
/* ================================================== */
@@ -1156,8 +1162,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
AMDGPU_MAX_SDMA_INSTANCES);
}
- if (le16_to_cpu(ip->hw_id) == UMC_HWID)
+ if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
adev->gmc.num_umc++;
+ adev->umc.node_inst_num++;
+ }
for (k = 0; k < num_base_address; k++) {
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e3e1ed4314dd..6c7d672412b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1315,7 +1315,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
- adev->in_suspend || adev->shutdown)
+ adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
return;
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 28fe6d941054..3f5d13035aff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -602,27 +602,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
{
int ret;
- int index, idx;
+ int index;
int timeout = 20000;
bool ras_intr = false;
bool skip_unsupport = false;
- bool dev_entered;
if (psp->adev->no_hw_access)
return 0;
- dev_entered = drm_dev_enter(adev_to_drm(psp->adev), &idx);
- /*
- * We allow sending PSP messages LOAD_ASD and UNLOAD_TA without acquiring
- * a lock in drm_dev_enter during driver unload because we must call
- * drm_dev_unplug as the beginning of unload driver sequence . It is very
- * crucial that userspace can't access device instances anymore.
- */
- if (!dev_entered)
- WARN_ON(psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_LOAD_ASD &&
- psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_UNLOAD_TA &&
- psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_INVOKE_CMD);
-
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
@@ -686,8 +673,6 @@ psp_cmd_submit_buf(struct psp_context *psp,
}
exit:
- if (dev_entered)
- drm_dev_exit(idx);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index f2bf979af588..36e19336f3b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -42,7 +42,7 @@
#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
#define LOOP_UMC_NODE_INST(node_inst) \
- for ((node_inst) = 0; (node_inst) < adev->umc.node_inst_num; (node_inst)++)
+ for_each_set_bit((node_inst), &(adev->umc.active_mask), adev->umc.node_inst_num)
#define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \
LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst))
@@ -69,7 +69,7 @@ struct amdgpu_umc {
/* number of umc instance with memory map register access */
uint32_t umc_inst_num;
- /*number of umc node instance with memory map register access*/
+ /* Total number of umc node instance including harvest one */
uint32_t node_inst_num;
/* UMC regiser per channel offset */
@@ -82,6 +82,9 @@ struct amdgpu_umc {
const struct amdgpu_umc_funcs *funcs;
struct amdgpu_umc_ras *ras;
+
+ /* active mask for umc node instance */
+ unsigned long active_mask;
};
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 85e0afc3d4f7..af7b3ba1ca00 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -567,7 +567,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
case IP_VERSION(8, 10, 0):
adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
- adev->umc.node_inst_num = adev->gmc.num_umc;
adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index 4b0d563c6522..4ef1fa4603c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -382,11 +382,6 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
break;
- case IP_VERSION(7, 5, 1):
- data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
- data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
- WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
- fallthrough;
default:
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
@@ -399,6 +394,15 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
break;
}
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 1):
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
+ data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
+ WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
+ break;
+ }
+
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index d972025f0d20..855d390c41de 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -444,9 +444,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
en = &nv_allowed_read_registers[i];
- if (adev->reg_offset[en->hwip][en->inst] &&
- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
- + en->reg_offset))
+ if (!adev->reg_offset[en->hwip][en->inst])
+ continue;
+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ + en->reg_offset))
continue;
*value = nv_get_register_value(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 7cd17dda32ce..2eddd7f6cd41 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -439,8 +439,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
en = &soc15_allowed_read_registers[i];
- if (adev->reg_offset[en->hwip][en->inst] &&
- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ if (!adev->reg_offset[en->hwip][en->inst])
+ continue;
+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 620f7409825d..061793d390cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -111,6 +111,7 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 2):
+ case IP_VERSION(4, 0, 4):
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
if (encode)
*codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
@@ -291,9 +292,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
en = &soc21_allowed_read_registers[i];
- if (adev->reg_offset[en->hwip][en->inst] &&
- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
- + en->reg_offset))
+ if (!adev->reg_offset[en->hwip][en->inst])
+ continue;
+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ + en->reg_offset))
continue;
*value = soc21_get_register_value(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
index 25eaf4af5fcf..c6dfd433fec7 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
@@ -31,9 +31,9 @@
/* number of umc instance with memory map register access */
#define UMC_V8_10_UMC_INSTANCE_NUM 2
-/* Total channel instances for all umc nodes */
+/* Total channel instances for all available umc nodes */
#define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \
- (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->umc.node_inst_num)
+ (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->gmc.num_umc)
/* UMC regiser per channel offset */
#define UMC_V8_10_PER_CHANNEL_OFFSET 0x400
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index cbef2e147da5..38c9e1ca6691 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
if (!pdd->doorbell_index) {
int r = kfd_alloc_process_doorbells(pdd->dev,
&pdd->doorbell_index);
- if (r)
+ if (r < 0)
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 24715ca2fa94..01383aac6b41 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -529,6 +529,19 @@ static struct clk_bw_params vg_bw_params = {
};
+static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
+{
+ uint32_t max = 0;
+ int i;
+
+ for (i = 0; i < num_clocks; ++i) {
+ if (clocks[i] > max)
+ max = clocks[i];
+ }
+
+ return max;
+}
+
static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table,
unsigned int voltage)
{
@@ -572,12 +585,16 @@ static void vg_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.num_entries = j + 1;
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
+ for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
}
+ bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 923a9fb3c887..27448ffe60a4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -46,6 +46,7 @@
#include "asic_reg/mp/mp_13_0_0_sh_mask.h"
#include "smu_cmn.h"
#include "amdgpu_ras.h"
+#include "umc_v8_10.h"
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -90,6 +91,12 @@
#define DEBUGSMC_MSG_Mode1Reset 2
+/*
+ * SMU_v13_0_10 supports ECCTABLE since version 80.34.0,
+ * use this to check ECCTABLE feature whether support
+ */
+#define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200
+
static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -229,6 +236,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(ACTIVITY_MONITOR_COEFF),
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
TAB_MAP(I2C_COMMANDS),
+ TAB_MAP(ECCINFO),
};
static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -462,6 +470,8 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -477,8 +487,14 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
if (!smu_table->watermarks_table)
goto err2_out;
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+ if (!smu_table->ecc_table)
+ goto err3_out;
+
return 0;
+err3_out:
+ kfree(smu_table->watermarks_table);
err2_out:
kfree(smu_table->gpu_metrics_table);
err1_out:
@@ -2036,6 +2052,64 @@ static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return -EOPNOTSUPP;
+
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) &&
+ (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))
+ return ret;
+ else
+ return -EOPNOTSUPP;
+}
+
+static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
+ EccInfoTable_t *ecc_table = NULL;
+ struct ecc_info_per_ch *ecc_info_per_channel = NULL;
+ int i, ret = 0;
+ struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
+
+ ret = smu_v13_0_0_check_ecc_table_support(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ECCINFO,
+ 0,
+ smu_table->ecc_table,
+ false);
+ if (ret) {
+ dev_info(adev->dev, "Failed to export SMU ecc table!\n");
+ return ret;
+ }
+
+ ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
+
+ for (i = 0; i < UMC_V8_10_TOTAL_CHANNEL_NUM(adev); i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo[i].mca_umc_addr;
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -2111,6 +2185,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
.gpo_control = smu_v13_0_gpo_control,
+ .get_ecc_info = smu_v13_0_0_get_ecc_info,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index 0264abe55278..faf5e9efa7d3 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
/* Sink EOTF is Bit map while infoframe is absolute values */
if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
- connector->hdr_sink_metadata.hdmi_type1.eotf)) {
- DRM_DEBUG_KMS("EOTF Not Supported\n");
- return -EINVAL;
- }
+ connector->hdr_sink_metadata.hdmi_type1.eotf))
+ DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
err = hdmi_drm_infoframe_init(frame);
if (err < 0)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 5457c02ca1ab..fed41800fea7 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1070,6 +1070,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
+ drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
if (state->writeback_job && state->writeback_job->fb)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 871870ddf7ec..949b18a29a55 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -23,7 +23,6 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
- select DEVFREQ_GOV_SIMPLE_ONDEMAND
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index d09221f97f71..a1e006ec5dce 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -151,8 +151,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, 1);
/* Enable local preemption for finegrain preemption */
- OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
- OUT_RING(ring, 0x02);
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x1);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
@@ -806,7 +806,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
/* Set the highest bank bit */
- if (adreno_is_a540(adreno_gpu))
+ if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
regbit = 2;
else
regbit = 1;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 7658e89844b4..f58dd564d122 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -63,7 +63,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
struct msm_ringbuffer *ring = gpu->rb[i];
spin_lock_irqsave(&ring->preempt_lock, flags);
- empty = (get_wptr(ring) == ring->memptrs->rptr);
+ empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
spin_unlock_irqrestore(&ring->preempt_lock, flags);
if (!empty)
@@ -207,6 +207,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
+ a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
}
/* Write a 0 to signal that we aren't switching pagetables */
@@ -257,7 +258,6 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
- ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
ptr->counter = counters_iova;
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index f3c9600221d4..7f5bc73b2040 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -974,7 +974,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
int status, ret;
if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
- return 0;
+ return -EINVAL;
gmu->hung = false;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index aae60cbd9164..6faea5049f76 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1746,7 +1746,9 @@ static void a6xx_destroy(struct msm_gpu *gpu)
a6xx_llc_slices_destroy(a6xx_gpu);
+ mutex_lock(&a6xx_gpu->gmu.lock);
a6xx_gmu_remove(a6xx_gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
adreno_gpu_cleanup(adreno_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 36f062c7582f..c5c4c93b3689 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -558,7 +558,8 @@ static void adreno_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_gpu *gpu = dev_to_gpu(dev);
- WARN_ON_ONCE(adreno_system_suspend(dev));
+ if (pm_runtime_enabled(dev))
+ WARN_ON_ONCE(adreno_system_suspend(dev));
gpu->funcs->destroy(gpu);
priv->gpu_pdev = NULL;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index cf053e8f081e..497c9e1673ab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -12,11 +12,15 @@
#include "dpu_hw_catalog.h"
#include "dpu_kms.h"
-#define VIG_MASK \
+#define VIG_BASE_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
- BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
+ BIT(DPU_SSPP_CDP) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+#define VIG_MASK \
+ (VIG_BASE_MASK | \
+ BIT(DPU_SSPP_CSC_10BIT))
+
#define VIG_MSM8998_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
@@ -26,10 +30,7 @@
#define VIG_SC7180_MASK \
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
-#define VIG_SM8250_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
-
-#define VIG_QCM2290_MASK (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL))
+#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
#define DMA_MSM8998_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
@@ -51,7 +52,7 @@
(DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
#define MIXER_MSM8998_MASK \
- (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+ (BIT(DPU_MIXER_SOURCESPLIT))
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
@@ -314,10 +315,9 @@ static const struct dpu_caps msm8998_dpu_caps = {
};
static const struct dpu_caps qcm2290_dpu_caps = {
- .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
- .ubwc_version = DPU_HW_UBWC_VER_20,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
@@ -353,9 +353,9 @@ static const struct dpu_caps sc7180_dpu_caps =