Merge tag 'amd-drm-fixes-6.3-2023-04-12' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes
amd-drm-fixes-6.3-2023-04-12: amdgpu: - SMU13 fixes - DP MST fix Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230412215637.7881-1-alexander.deucher@amd.com
This commit is contained in:
@@ -177,6 +177,40 @@ void dm_helpers_dp_update_branch_info(
|
||||
const struct dc_link *link)
|
||||
{}
|
||||
|
||||
static void dm_helpers_construct_old_payload(
|
||||
struct dc_link *link,
|
||||
int pbn_per_slot,
|
||||
struct drm_dp_mst_atomic_payload *new_payload,
|
||||
struct drm_dp_mst_atomic_payload *old_payload)
|
||||
{
|
||||
struct link_mst_stream_allocation_table current_link_table =
|
||||
link->mst_stream_alloc_table;
|
||||
struct link_mst_stream_allocation *dc_alloc;
|
||||
int i;
|
||||
|
||||
*old_payload = *new_payload;
|
||||
|
||||
/* Set correct time_slots/PBN of old payload.
|
||||
* other fields (delete & dsc_enabled) in
|
||||
* struct drm_dp_mst_atomic_payload are don't care fields
|
||||
* while calling drm_dp_remove_payload()
|
||||
*/
|
||||
for (i = 0; i < current_link_table.stream_count; i++) {
|
||||
dc_alloc =
|
||||
¤t_link_table.stream_allocations[i];
|
||||
|
||||
if (dc_alloc->vcp_id == new_payload->vcpi) {
|
||||
old_payload->time_slots = dc_alloc->slot_count;
|
||||
old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* make sure there is an old payload*/
|
||||
ASSERT(i != current_link_table.stream_count);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Writes payload allocation table in immediate downstream device.
|
||||
*/
|
||||
@@ -188,7 +222,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_dp_mst_topology_state *mst_state;
|
||||
struct drm_dp_mst_atomic_payload *payload;
|
||||
struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
@@ -204,17 +238,26 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
|
||||
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
|
||||
|
||||
/* It's OK for this to fail */
|
||||
payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
|
||||
if (enable)
|
||||
drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
|
||||
else
|
||||
drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
|
||||
new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
|
||||
|
||||
if (enable) {
|
||||
target_payload = new_payload;
|
||||
|
||||
drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
|
||||
} else {
|
||||
/* construct old payload by VCPI*/
|
||||
dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
|
||||
new_payload, &old_payload);
|
||||
target_payload = &old_payload;
|
||||
|
||||
drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
|
||||
}
|
||||
|
||||
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
|
||||
* AUX message. The sequence is slot 1-63 allocated sequence for each
|
||||
* stream. AMD ASIC stream slot allocation should follow the same
|
||||
* sequence. copy DRM MST allocation to dc */
|
||||
fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
|
||||
fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -61,6 +61,12 @@
|
||||
#define CTF_OFFSET_HOTSPOT 5
|
||||
#define CTF_OFFSET_MEM 5
|
||||
|
||||
static const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
|
||||
static const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
|
||||
|
||||
#define DECODE_GEN_SPEED(gen_speed_idx) (pmfw_decoded_link_speed[gen_speed_idx])
|
||||
#define DECODE_LANE_WIDTH(lane_width_idx) (pmfw_decoded_link_width[lane_width_idx])
|
||||
|
||||
struct smu_13_0_max_sustainable_clocks {
|
||||
uint32_t display_clock;
|
||||
uint32_t phy_clock;
|
||||
|
||||
@@ -1144,8 +1144,8 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
|
||||
(pcie_table->pcie_lane[i] == 5) ? "x12" :
|
||||
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
|
||||
pcie_table->clk_freq[i],
|
||||
((gen_speed - 1) == pcie_table->pcie_gen[i]) &&
|
||||
(lane_width == link_width[pcie_table->pcie_lane[i]]) ?
|
||||
(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
|
||||
(lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
|
||||
"*" : "");
|
||||
break;
|
||||
|
||||
|
||||
@@ -575,6 +575,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
|
||||
dpm_table);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (skutable->DriverReportedClocks.GameClockAc &&
|
||||
(dpm_table->dpm_levels[dpm_table->count - 1].value >
|
||||
skutable->DriverReportedClocks.GameClockAc)) {
|
||||
dpm_table->dpm_levels[dpm_table->count - 1].value =
|
||||
skutable->DriverReportedClocks.GameClockAc;
|
||||
dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
|
||||
}
|
||||
} else {
|
||||
dpm_table->count = 1;
|
||||
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
|
||||
@@ -828,6 +836,57 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type,
|
||||
uint32_t *min,
|
||||
uint32_t *max)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context =
|
||||
smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_dpm_table *dpm_table;
|
||||
|
||||
switch (clk_type) {
|
||||
case SMU_MCLK:
|
||||
case SMU_UCLK:
|
||||
/* uclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.uclk_table;
|
||||
break;
|
||||
case SMU_GFXCLK:
|
||||
case SMU_SCLK:
|
||||
/* gfxclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.gfx_table;
|
||||
break;
|
||||
case SMU_SOCCLK:
|
||||
/* socclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.soc_table;
|
||||
break;
|
||||
case SMU_FCLK:
|
||||
/* fclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.fclk_table;
|
||||
break;
|
||||
case SMU_VCLK:
|
||||
case SMU_VCLK1:
|
||||
/* vclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.vclk_table;
|
||||
break;
|
||||
case SMU_DCLK:
|
||||
case SMU_DCLK1:
|
||||
/* dclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.dclk_table;
|
||||
break;
|
||||
default:
|
||||
dev_err(smu->adev->dev, "Unsupported clock type!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (min)
|
||||
*min = dpm_table->min;
|
||||
if (max)
|
||||
*max = dpm_table->max;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_read_sensor(struct smu_context *smu,
|
||||
enum amd_pp_sensors sensor,
|
||||
void *data,
|
||||
@@ -1074,8 +1133,8 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
|
||||
(pcie_table->pcie_lane[i] == 5) ? "x12" :
|
||||
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
|
||||
pcie_table->clk_freq[i],
|
||||
(gen_speed == pcie_table->pcie_gen[i]) &&
|
||||
(lane_width == pcie_table->pcie_lane[i]) ?
|
||||
(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
|
||||
(lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
|
||||
"*" : "");
|
||||
break;
|
||||
|
||||
@@ -1329,9 +1388,17 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
|
||||
&dpm_context->dpm_tables.fclk_table;
|
||||
struct smu_umd_pstate_table *pstate_table =
|
||||
&smu->pstate_table;
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
PPTable_t *pptable = table_context->driver_pptable;
|
||||
DriverReportedClocks_t driver_clocks =
|
||||
pptable->SkuTable.DriverReportedClocks;
|
||||
|
||||
pstate_table->gfxclk_pstate.min = gfx_table->min;
|
||||
pstate_table->gfxclk_pstate.peak = gfx_table->max;
|
||||
if (driver_clocks.GameClockAc &&
|
||||
(driver_clocks.GameClockAc < gfx_table->max))
|
||||
pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
|
||||
else
|
||||
pstate_table->gfxclk_pstate.peak = gfx_table->max;
|
||||
|
||||
pstate_table->uclk_pstate.min = mem_table->min;
|
||||
pstate_table->uclk_pstate.peak = mem_table->max;
|
||||
@@ -1348,12 +1415,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
|
||||
pstate_table->fclk_pstate.min = fclk_table->min;
|
||||
pstate_table->fclk_pstate.peak = fclk_table->max;
|
||||
|
||||
/*
|
||||
* For now, just use the mininum clock frequency.
|
||||
* TODO: update them when the real pstate settings available
|
||||
*/
|
||||
pstate_table->gfxclk_pstate.standard = gfx_table->min;
|
||||
pstate_table->uclk_pstate.standard = mem_table->min;
|
||||
if (driver_clocks.BaseClockAc &&
|
||||
driver_clocks.BaseClockAc < gfx_table->max)
|
||||
pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
|
||||
else
|
||||
pstate_table->gfxclk_pstate.standard = gfx_table->max;
|
||||
pstate_table->uclk_pstate.standard = mem_table->max;
|
||||
pstate_table->socclk_pstate.standard = soc_table->min;
|
||||
pstate_table->vclk_pstate.standard = vclk_table->min;
|
||||
pstate_table->dclk_pstate.standard = dclk_table->min;
|
||||
@@ -1676,7 +1743,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
|
||||
.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
|
||||
.init_pptable_microcode = smu_v13_0_init_pptable_microcode,
|
||||
.populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
|
||||
.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
|
||||
.get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
|
||||
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
|
||||
.read_sensor = smu_v13_0_7_read_sensor,
|
||||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
|
||||
Reference in New Issue
Block a user