diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
50 files changed, 437 insertions, 192 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 2d5c7daaee23..29d64e7e304f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -847,6 +847,73 @@ static enum bp_result bios_parser_get_spread_spectrum_info( return result; } +static enum bp_result get_soc_bb_info_v4_4( + struct bios_parser *bp, + struct bp_soc_bb_info *soc_bb_info) +{ + enum bp_result result = BP_RESULT_OK; + struct atom_display_controller_info_v4_4 *disp_cntl_tbl = NULL; + + if (!soc_bb_info) + return BP_RESULT_BADINPUT; + + if (!DATA_TABLES(dce_info)) + return BP_RESULT_BADBIOSTABLE; + + if (!DATA_TABLES(smu_info)) + return BP_RESULT_BADBIOSTABLE; + + disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_4, + DATA_TABLES(dce_info)); + if (!disp_cntl_tbl) + return BP_RESULT_BADBIOSTABLE; + + soc_bb_info->dram_clock_change_latency_100ns = disp_cntl_tbl->max_mclk_chg_lat; + soc_bb_info->dram_sr_enter_exit_latency_100ns = disp_cntl_tbl->max_sr_enter_exit_lat; + soc_bb_info->dram_sr_exit_latency_100ns = disp_cntl_tbl->max_sr_exit_lat; + + return result; +} + +static enum bp_result bios_parser_get_soc_bb_info( + struct dc_bios *dcb, + struct bp_soc_bb_info *soc_bb_info) +{ + struct bios_parser *bp = BP_FROM_DCB(dcb); + enum bp_result result = BP_RESULT_UNSUPPORTED; + struct atom_common_table_header *header; + struct atom_data_revision tbl_revision; + + if (!soc_bb_info) /* check for bad input */ + return BP_RESULT_BADINPUT; + + if (!DATA_TABLES(dce_info)) + return BP_RESULT_UNSUPPORTED; + + header = GET_IMAGE(struct atom_common_table_header, + DATA_TABLES(dce_info)); + get_atom_data_table_revision(header, &tbl_revision); + + switch (tbl_revision.major) { + case 4: + switch (tbl_revision.minor) { + case 1: + case 2: + case 3: + break; + case 4: + result = get_soc_bb_info_v4_4(bp, soc_bb_info); + default: + break; + } + break; + default: + break; + } + + return result; +} + static enum bp_result get_embedded_panel_info_v2_1( struct bios_parser *bp, struct embedded_panel_info *info) @@ -2222,7 +2289,9 @@ static const struct dc_vbios_funcs vbios_funcs = { .get_atom_dc_golden_table = bios_get_atom_dc_golden_table, - .enable_lvtma_control = bios_parser_enable_lvtma_control + .enable_lvtma_control = bios_parser_enable_lvtma_control, + + .get_soc_bb_info = bios_parser_get_soc_bb_info, }; static bool bios_parser2_construct( diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c index c11c6b3a787d..0267644717b2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c @@ -80,7 +80,7 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = { /* ClocksStatePerformance */ { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; -int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) +static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); int dprefclk_wdivider; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 543afa34d87a..136ae6d70c80 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -761,6 +761,7 @@ void rn_clk_mgr_construct( { struct dc_debug_options *debug = &ctx->dc->debug; struct dpm_clocks clock_table = { 0 }; + enum pp_smu_status status = 0; clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn21_funcs; @@ -818,8 +819,10 @@ void rn_clk_mgr_construct( clk_mgr->base.bw_params = &rn_bw_params; if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) { - pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); - if (ctx->dc_bios && ctx->dc_bios->integrated_info) { + status = pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); + + if (status == PP_SMU_RESULT_OK && + ctx->dc_bios && ctx->dc_bios->integrated_info) { rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index dc463d99ef50..83ce55edb3aa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1246,6 +1246,19 @@ void dc_trigger_sync(struct dc *dc, struct dc_state *context) } } +static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) +{ + int i; + unsigned int stream_mask = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream) + stream_mask |= 1 << i; + } + + return stream_mask; +} + /* * Applies given context to HW and copy it into current context. * It's up to the user to release the src context afterwards. @@ -1273,7 +1286,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc->optimize_seamless_boot_streams++; } - if (dc->optimize_seamless_boot_streams == 0) + if (context->stream_count > dc->optimize_seamless_boot_streams) dc->hwss.prepare_bandwidth(dc, context); disable_dangling_plane(dc, context); @@ -1355,13 +1368,18 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_enable_stereo(dc, context, dc_streams, context->stream_count); - if (dc->optimize_seamless_boot_streams == 0) { + if (context->stream_count > dc->optimize_seamless_boot_streams) { /* Must wait for no flips to be pending before doing optimize bw */ wait_for_no_pipes_pending(dc, context); /* pplib is notified if disp_num changed */ dc->hwss.optimize_bandwidth(dc, context); } + context->stream_mask = get_stream_mask(dc, context); + + if (context->stream_mask != dc->current_state->stream_mask) + dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); + for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; @@ -1481,13 +1499,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) return true; } -struct dc_state *dc_create_state(struct dc *dc) +static void init_state(struct dc *dc, struct dc_state *context) { - struct dc_state *context = kvzalloc(sizeof(struct dc_state), - GFP_KERNEL); - - if (!context) - return NULL; /* Each context must have their own instance of VBA and in order to * initialize and obtain IP and SOC the base DML instance from DC is * initially copied into every context @@ -1495,6 +1508,17 @@ struct dc_state *dc_create_state(struct dc *dc) #ifdef CONFIG_DRM_AMD_DC_DCN memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); #endif +} + +struct dc_state *dc_create_state(struct dc *dc) +{ + struct dc_state *context = kzalloc(sizeof(struct dc_state), + GFP_KERNEL); + + if (!context) + return NULL; + + init_state(dc, context); kref_init(&context->refcount); @@ -2415,8 +2439,7 @@ static void commit_planes_for_stream(struct dc *dc, plane_state->triplebuffer_flips = false; if (update_type == UPDATE_TYPE_FAST && dc->hwss.program_triplebuffer != NULL && - !plane_state->flip_immediate && - !dc->debug.disable_tri_buf) { + !plane_state->flip_immediate && dc->debug.enable_tri_buf) { /*triple buffer for VUpdate only*/ plane_state->triplebuffer_flips = true; } @@ -2443,8 +2466,7 @@ static void commit_planes_for_stream(struct dc *dc, ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); - if (dc->hwss.program_triplebuffer != NULL && - !dc->debug.disable_tri_buf) { + if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); @@ -2509,8 +2531,7 @@ static void commit_planes_for_stream(struct dc *dc, if (pipe_ctx->plane_state != plane_state) continue; /*program triple buffer after lock based on flip type*/ - if (dc->hwss.program_triplebuffer != NULL && - !dc->debug.disable_tri_buf) { + if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { /*only enable triplebuffer for fast_update*/ dc->hwss.program_triplebuffer( dc, pipe_ctx, plane_state->triplebuffer_flips); @@ -2965,7 +2986,7 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable) if (enable && !link->psr_settings.psr_allow_active) return dc_link_set_psr_allow_active(link, true, false); else if (!enable && link->psr_settings.psr_allow_active) - return dc_link_set_psr_allow_active(link, false, false); + return dc_link_set_psr_allow_active(link, false, true); } } @@ -3018,4 +3039,10 @@ void dc_lock_memory_clock_frequency(struct dc *dc) if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); } + +bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc, + struct dc_plane_state *plane) +{ + return false; +} #endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 437d1a7a16fe..1871ff6119ae 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2946,7 +2946,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) pbn = get_pbn_from_timing(pipe_ctx); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - stream_encoder->funcs->set_mst_bandwidth( + stream_encoder->funcs->set_throttled_vcp_size( stream_encoder, avg_time_slots_per_mtp); @@ -2974,7 +2974,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) */ /* slot X.Y */ - stream_encoder->funcs->set_mst_bandwidth( + stream_encoder->funcs->set_throttled_vcp_size( stream_encoder, avg_time_slots_per_mtp); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index d1d95d3e248a..b9b66db8332b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -49,14 +49,14 @@ static struct dc_link_settings get_common_supported_link_settings( struct dc_link_settings link_setting_a, struct dc_link_settings link_setting_b); -static uint32_t get_training_aux_rd_interval( +static uint32_t get_eq_training_aux_rd_interval( struct dc_link *link, - uint32_t default_wait_in_micro_secs) + const struct dc_link_settings *link_settings) { union training_aux_rd_interval training_rd_interval; + uint32_t wait_in_micro_secs = 400; memset(&training_rd_interval, 0, sizeof(training_rd_interval)); - /* overwrite the delay if rev > 1.1*/ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { /* DP 1.2 or later - retrieve delay through @@ -68,10 +68,10 @@ static uint32_t get_training_aux_rd_interval( sizeof(training_rd_interval)); if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - default_wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; } - return default_wait_in_micro_secs; + return wait_in_micro_secs; } static void wait_for_training_aux_rd_interval( @@ -101,7 +101,16 @@ static void dpcd_set_training_pattern( dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } -static enum dc_dp_training_pattern get_supported_tp(struct dc_link *link) +static enum dc_dp_training_pattern decide_cr_training_pattern( + const struct dc_link_settings *link_settings) +{ + enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_1; + + return pattern; +} + +static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, + const struct dc_link_settings *link_settings) { enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2; struct encoder_feature_support *features = &link->link_enc->features; @@ -132,7 +141,6 @@ static void dpcd_set_link_settings( union down_spread_ctrl downspread = { {0} }; union lane_count_set lane_count_set = { {0} }; - enum dc_dp_training_pattern dp_tr_pattern; downspread.raw = (uint8_t) (lt_settings->link_settings.link_spread); @@ -143,9 +151,8 @@ static void dpcd_set_link_settings( lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - dp_tr_pattern = get_supported_tp(link); - if (dp_tr_pattern != DP_TRAINING_PATTERN_SEQUENCE_4) { + if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; } @@ -373,34 +380,30 @@ static void dpcd_set_lt_pattern_and_lane_settings( static bool is_cr_done(enum dc_lane_count ln_count, union lane_status *dpcd_lane_status) { - bool done = true; uint32_t lane; /*LANEx_CR_DONE bits All 1's?*/ for (lane = 0; lane < (uint32_t)(ln_count); lane++) { if (!dpcd_lane_status[lane].bits.CR_DONE_0) - done = false; + return false; } - return done; - + return true; } static bool is_ch_eq_done(enum dc_lane_count ln_count, union lane_status *dpcd_lane_status, union lane_align_status_updated *lane_status_updated) { - bool done = true; uint32_t lane; if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE) - done = false; + return false; else { for (lane = 0; lane < (uint32_t)(ln_count); lane++) { if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 || !dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) - done = false; + return false; } } - return done; - + return true; } static void update_drive_settings( @@ -979,7 +982,7 @@ static void start_clock_recovery_pattern_early(struct dc_link *link, { DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", __func__); - dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset); + dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset); dp_set_hw_lane_settings(link, lt_settings, offset); udelay(400); } @@ -994,7 +997,6 @@ static enum link_training_result perform_clock_recovery_sequence( uint32_t wait_time_microsec; struct link_training_settings req_settings; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; @@ -1002,7 +1004,7 @@ static enum link_training_result perform_clock_recovery_sequence( retry_count = 0; if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) - dp_set_hw_training_pattern(link, tr_pattern, offset); + dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset); /* najeeb - The synaptics MST hub can put the LT in * infinite loop by switching the VS @@ -1029,7 +1031,7 @@ static enum link_training_result perform_clock_recovery_sequence( dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, - tr_pattern, + lt_settings->pattern_for_cr, offset); else dpcd_set_lane_settings( @@ -1113,7 +1115,7 @@ static inline enum link_training_result perform_link_training_int( * TPS4 must be used instead of POST_LT_ADJ_REQ. */ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || - get_supported_tp(link) == DP_TRAINING_PATTERN_SEQUENCE_4) + lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) return status; if (status == LINK_TRAINING_SUCCESS && @@ -1245,17 +1247,21 @@ static void initialize_training_settings( if (overrides->cr_pattern_time != NULL) lt_settings->cr_pattern_time = *overrides->cr_pattern_time; else - lt_settings->cr_pattern_time = get_training_aux_rd_interval(link, 100); + lt_settings->cr_pattern_time = 100; if (overrides->eq_pattern_time != NULL) lt_settings->eq_pattern_time = *overrides->eq_pattern_time; else - lt_settings->eq_pattern_time = get_training_aux_rd_interval(link, 400); + lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); + if (overrides->pattern_for_cr != NULL) + lt_settings->pattern_for_cr = *overrides->pattern_for_cr; + else + lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); if (overrides->pattern_for_eq != NULL) lt_settings->pattern_for_eq = *overrides->pattern_for_eq; else - lt_settings->pattern_for_eq = get_supported_tp(link); + lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); if (overrides->enhanced_framing != NULL) lt_settings->enhanced_framing = *overrides->enhanced_framing; @@ -1457,7 +1463,6 @@ bool dc_link_dp_perform_link_training_skip_aux( const struct dc_link_settings *link_setting) { struct link_training_settings lt_settings; - enum dc_dp_training_pattern pattern_for_cr = DP_TRAINING_PATTERN_SEQUENCE_1; initialize_training_settings( link, @@ -1468,7 +1473,7 @@ bool dc_link_dp_perform_link_training_skip_aux( /* 1. Perform_clock_recovery_sequence. */ /* transmit training pattern for clock recovery */ - dp_set_hw_training_pattern(link, pattern_for_cr, DPRX); + dp_set_hw_training_pattern(link, lt_settings.pattern_for_cr, DPRX); /* call HWSS to set lane settings*/ dp_set_hw_lane_settings(link, <_settings, DPRX); @@ -1610,6 +1615,9 @@ bool perform_link_training_with_retries( for (j = 0; j < attempts; ++j) { + DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n", + __func__, (unsigned int)j + 1, attempts); + dp_enable_link_phy( link, signal, @@ -1638,6 +1646,9 @@ bool perform_link_training_with_retries( if (j == (attempts - 1)) break; + DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n", + __func__, (unsigned int)j + 1, attempts); + dp_disable_link_phy(link, signal); msleep(delay_between_attempts); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index dd88eb348dfa..81c026319ccd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -104,6 +104,12 @@ void dp_enable_link_phy( struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source; unsigned int i; + + if (link->connector_signal == SIGNAL_TYPE_EDP) { + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + } + /* If the current pixel clock source is not DTO(happens after * switching from HDMI passive dongle to DP on the same connector), * switch the pixel clock source to DTO. diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c6b737dd8425..4cea9344d8aa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -782,7 +782,13 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx) calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); - data->recout.x = stream->dst.x; + /* + * Only the leftmost ODM pipe should be offset by a nonzero distance + */ + if (!pipe_ctx->prev_odm_pipe) + data->recout.x = stream->dst.x; + else + data->recout.x = 0; if (stream->src.x < surf_clip.x) data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width / stream->src.width; @@ -957,7 +963,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; const struct dc_stream_state *stream = pipe_ctx->stream; - struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe; + struct pipe_ctx *odm_pipe = pipe_ctx; struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct rect src = pipe_ctx->plane_state->src_rect; int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v; @@ -988,21 +994,24 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) swap(src.width, src.height); } + /*modified recout_skip_h calculation due to odm having no recout offset*/ + while (odm_pipe->prev_odm_pipe) { + odm_idx++; + odm_pipe = odm_pipe->prev_odm_pipe; + } + /*odm_pipe is the leftmost pipe in the ODM group*/ + recout_skip_h = odm_idx * data->recout.width; + /* Recout matching initial vp offset = recout_offset - (stream dst offset + * ((surf dst offset - stream src offset) * 1/ stream scaling ratio) * - (surf surf_src offset * 1/ full scl ratio)) */ - recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x) + recout_skip_h += odm_pipe->plane_res.scl_data.recout.x + - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x) * stream->dst.width / stream->src.width - src.x * plane_state->dst_rect.width / src.width * stream->dst.width / stream->src.width); - /*modified recout_skip_h calculation due to odm having no recout offset*/ - while (odm_pipe) { - odm_idx++; - odm_pipe = odm_pipe->prev_odm_pipe; - } - if (odm_idx) - recout_skip_h += odm_idx * data->recout.width; + recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y) * stream->dst.height / stream->src.height - diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index f42a17d765e3..d48fd87d3b95 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -123,7 +123,6 @@ static bool dc_stream_construct(struct dc_stream_state *stream, return false; } stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->out_transfer_func->ctx = stream->ctx; stream->stream_id = stream->ctx->dc_stream_id_count; stream->ctx->dc_stream_id_count++; @@ -298,7 +297,7 @@ bool dc_stream_set_cursor_attributes( #if defined(CONFIG_DRM_AMD_DC_DCN3_0) /* disable idle optimizations while updating cursor */ if (dc->idle_optimizations_allowed) { - dc->hwss.apply_idle_power_optimizations(dc, false); + dc_allow_idle_optimizations(dc, false); reset_idle_optimizations = true; } @@ -326,7 +325,7 @@ bool dc_stream_set_cursor_attributes( #if defined(CONFIG_DRM_AMD_DC_DCN3_0) /* re-enable idle optimizations if necessary */ if (reset_idle_optimizations) - dc->hwss.apply_idle_power_optimizations(dc, true); + dc_allow_idle_optimizations(dc, true); #endif return true; @@ -359,9 +358,8 @@ bool dc_stream_set_cursor_position( #if defined(CONFIG_DRM_AMD_DC_DCN3_0) /* disable idle optimizations if enabling cursor */ - if (dc->idle_optimizations_allowed && - !stream->cursor_position.enable && position->enable) { - dc->hwss.apply_idle_power_optimizations(dc, false); + if (dc->idle_optimizations_allowed && !stream->cursor_position.enable && position->enable) { + dc_allow_idle_optimizations(dc, false); reset_idle_optimizations = true; } @@ -392,7 +390,7 @@ bool dc_stream_set_cursor_position( #if defined(CONFIG_DRM_AMD_DC_DCN3_0) /* re-enable idle optimizations if necessary */ if (reset_idle_optimizations) - dc->hwss.apply_idle_power_optimizations(dc, true); + dc_allow_idle_optimizations(dc, true); #endif return true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index ea1229a3e2b2..3d7d27435f15 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -48,22 +48,17 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl plane_state->in_transfer_func = dc_create_transfer_func(); if (plane_state->in_transfer_func != NULL) { plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - plane_state->in_transfer_func->ctx = ctx; } plane_state->in_shaper_func = dc_create_transfer_func(); if (plane_state->in_shaper_func != NULL) { plane_state->in_shaper_func->type = TF_TYPE_BYPASS; - plane_state->in_shaper_func->ctx = ctx; } plane_state->lut3d_func = dc_create_3dlut_func(); - if (plane_state->lut3d_func != NULL) { - plane_state->lut3d_func->ctx = ctx; - } + plane_state->blend_tf = dc_create_transfer_func(); if (plane_state->blend_tf != NULL) { plane_state->blend_tf->type = TF_TYPE_BYPASS; - plane_state->blend_tf->ctx = ctx; } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1d9c8e09c08b..d9b22d6a985a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -42,7 +42,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.99" +#define DC_VER "3.2.102" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -476,7 +476,7 @@ struct dc_debug_options { unsigned int force_odm_combine_4to1; //bit vector based on otg inst #endif unsigned int force_fclk_khz; - bool disable_tri_buf; + bool enable_tri_buf; bool dmub_offload_enabled; bool dmcub_emulation; #if defined(CONFIG_DRM_AMD_DC_DCN3_0) @@ -745,7 +745,6 @@ struct dc_transfer_func { enum dc_transfer_func_predefined tf; /* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/ uint32_t sdr_ref_white_level; - struct dc_context *ctx; union { struct pwl_params pwl; struct dc_transfer_func_distributed_points tf_pts; @@ -772,7 +771,6 @@ struct dc_3dlut { struct tetrahedral_params lut_3d; struct fixed31_32 hdr_multiplier; union dc_3dlut_state state; - struct dc_context *ctx; }; /* * This structure is filled in by dc_surface_get_status and contains @@ -1250,6 +1248,9 @@ enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); #if defined(CONFIG_DRM_AMD_DC_DCN3_0) +bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, + struct dc_plane_state *plane); + void dc_allow_idle_optimizations(struct dc *dc, bool allow); /* diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 0811f941f430..e146e3cba8eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -140,6 +140,10 @@ struct dc_vbios_funcs { enum bp_result (*enable_lvtma_control)( struct dc_bios *bios, uint8_t uc_pwr_on); + + enum bp_result (*get_soc_bb_info)( + struct dc_bios *dcb, + struct bp_soc_bb_info *soc_bb_info); }; struct bios_registers { diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index eea2429ac67d..b98754811977 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -132,3 +132,19 @@ void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv) /* Continue spinning so we don't hang the ASIC. */ } } + +bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, + unsigned int stream_mask) +{ + struct dmub_srv *dmub; + const uint32_t timeout = 30; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + dmub = dc_dmub_srv->dmub; + + return dmub_srv_send_gpint_command( + dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK, + stream_mask, timeout) == DMUB_STATUS_OK; +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index a3a09ccb6d26..bb4ab61887e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -56,4 +56,6 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); +bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, + unsigned int stream_mask); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index a8a3b0643505..80a2191a3115 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -123,6 +123,7 @@ struct dc_link_training_overrides { uint16_t *cr_pattern_time; uint16_t *eq_pattern_time; + enum dc_dp_training_pattern *pattern_for_cr; enum dc_dp_training_pattern *pattern_for_eq; enum dc_link_spread *downspread; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index aa8e0955db48..c47a19719de2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -122,7 +122,7 @@ struct dc_context { }; -#define DC_MAX_EDID_BUFFER_SIZE 1024 +#define DC_MAX_EDID_BUFFER_SIZE 1280 #define DC_EDID_BLOCK_SIZE 128 #define MAX_SURFACE_NUM 4 #define NUM_PIXEL_FORMATS 10 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 4cdaaf4d881c..5054bb567b74 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -710,7 +710,7 @@ static void dce110_stream_encoder_lvds_set_stream_attribute( ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB); } -static void dce110_stream_encoder_set_mst_bandwidth( +static void dce110_stream_encoder_set_throttled_vcp_size( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp) { @@ -1621,8 +1621,8 @@ static const struct stream_encoder_funcs dce110_str_enc_funcs = { dce110_stream_encoder_dvi_set_stream_attribute, .lvds_set_stream_attribute = dce110_stream_encoder_lvds_set_stream_attribute, - .set_mst_bandwidth = - dce110_stream_encoder_set_mst_bandwidth, + .set_throttled_vcp_size = + dce110_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = dce110_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 0603ddca7bd0..1002ce9979dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -810,37 +810,66 @@ void dce110_edp_power_control( if (power_up != link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) { + + unsigned long long current_ts = dm_get_timestamp(ctx); + unsigned long long time_since_edp_poweroff_ms = + div64_u64(dm_get_elapse_time_in_ns( + ctx, + current_ts, + link->link_trace.time_stamp.edp_poweroff), 1000000); + unsigned long long time_since_edp_poweron_ms = + div64_u64(dm_get_elapse_time_in_ns( + ctx, + current_ts, + link->link_trace.time_stamp.edp_poweron), 1000000); + DC_LOG_HW_RESUME_S3( + "%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu", + __func__, + power_up, + current_ts, + link->link_trace.time_stamp.edp_poweroff, + link->link_trace.time_stamp.edp_poweron, + time_since_edp_poweroff_ms, + time_since_edp_poweron_ms); + /* Send VBIOS command to prompt eDP panel power */ if (power_up) { - unsigned long long current_ts = dm_get_timestamp(ctx); - unsigned long long duration_in_ms = - div64_u64(dm_get_elapse_time_in_ns( - ctx, - current_ts, - link->link_trace.time_stamp.edp_poweroff), 1000000); - unsigned long long wait_time_ms = 0; - - /* max 500ms from LCDVDD off to on */ - unsigned long long edp_poweroff_time_ms = 500; + /* edp requires a min of 500ms from LCDVDD off to on */ + unsigned long long remaining_min_edp_poweroff_time_ms = 500; + /* add time defined by a patch, if any (usually patch extra_t12_ms is 0) */ if (link->local_sink != NULL) - edp_poweroff_time_ms = - 500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms; - if (link->link_trace.time_stamp.edp_poweroff == 0) - wait_time_ms = edp_poweroff_time_ms; - else if (duration_in_ms < edp_poweroff_time_ms) - wait_time_ms = edp_poweroff_time_ms - duration_in_ms; - - if (wait_time_ms) { - msleep(wait_time_ms); - dm_output_to_console("%s: wait %lld ms to power on eDP.\n", - __func__, wait_time_ms); + remaining_min_edp_poweroff_time_ms += + link->local_sink->edid_caps.panel_patch.extra_t12_ms; + + /* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */ + if (link->link_trace.time_stamp.edp_poweroff != 0) { + if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms) + remaining_min_edp_poweroff_time_ms = + remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms; + else + remaining_min_edp_poweroff_time_ms = 0; } + if (remaining_min_edp_poweroff_time_ms) { + DC_LOG_HW_RESUME_S3( + "%s: remaining_min_edp_poweroff_time_ms=%llu: begin wait.\n", + __func__, remaining_min_edp_poweroff_time_ms); + msleep(remaining_min_edp_poweroff_time_ms); + DC_LOG_HW_RESUME_S3( + "%s: remaining_min_edp_poweroff_time_ms=%llu: end wait.\n", + __func__, remaining_min_edp_poweroff_time_ms); + dm_output_to_console("%s: wait %lld ms to power on eDP.\n", + __func__, remaining_min_edp_poweroff_time_ms); + } else { + DC_LOG_HW_RESUME_S3( + "%s: remaining_min_edp_poweroff_time_ms=%llu: no wait required.\n", + __func__, remaining_min_edp_poweroff_time_ms); + } } DC_LOG_HW_RESUME_S3( - "%s: Panel Power action: %s\n", + "%s: BEGIN: Panel Power action: %s\n", __func__, (power_up ? "On":"Off")); cntl.action = power_up ? @@ -864,12 +893,23 @@ void dce110_edp_power_control( bp_result = link_transmitter_control(ctx->dc_bios, &cntl); + DC_LOG_HW_RESUME_S3( + "%s: END: Panel Power action: %s bp_result=%u\n", + __func__, (power_up ? "On":"Off"), + bp_result); + if (!power_up) /*save driver power off time stamp*/ link->link_trace.time_stamp.edp_poweroff = dm_get_timestamp(ctx); else link->link_trace.time_stamp.edp_poweron = dm_get_timestamp(ctx); + DC_LOG_HW_RESUME_S3( + "%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n", + __func__, + link->link_trace.time_stamp.edp_poweroff, + link->link_trace.time_stamp.edp_poweron); + if (bp_result != BP_RESULT_OK) DC_LOG_ERROR( "%s: Panel Power bp_result: %d\n", diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 47a39eb9400b..7a00fe525dfb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -325,8 +325,6 @@ bool cm_helper_translate_curve_to_hw_format( if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS) return false; - PERF_TRACE_CTX(output_tf->ctx); - corner_points = lut_params->corner_points; rgb_resulted = lut_params->rgb_resulted; hw_points = 0; @@ -524,8 +522,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format( if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS) return false; - PERF_TRACE_CTX(output_tf->ctx); - corner_points = lut_params->corner_points; rgb_resulted = lut_params->rgb_resulted; hw_points = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index cedf359a00f5..db5615a51fea 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -734,6 +734,9 @@ bool hubp1_is_flip_pending(struct hubp *hubp) struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); struct dc_plane_address earliest_inuse_address; + if (hubp && hubp->power_gated) + return false; + REG_GET(DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, &flip_pending); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 8ca94f506195..d0f3bf953d02 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2765,7 +2765,7 @@ bool dcn10_disconnect_pipes( struct dc *dc, struct dc_state *context) { - bool found_stream = false; + bool found_pipe = false; int i, j; struct dce_hwseq *hws = dc->hwseq; struct dc_state *old_ctx = dc->current_state; @@ -2805,26 +2805,28 @@ bool dcn10_disconnect_pipes( old_ctx->res_ctx.pipe_ctx[i].top_pipe) { /* Find the top pipe in the new ctx for the bottom pipe that we - * want to remove by comparing the streams. If both pipes are being - * disabled then do it in the regular pipe programming sequence + * want to remove by comparing the streams and planes. If both + * pipes are being disabled then do it in the regular pipe + * programming sequence */ for (j = 0; j < dc->res_pool->pipe_count; j++) { if (old_ctx->res_ctx.pipe_ctx[i].top_pipe->stream == context->res_ctx.pipe_ctx[j].stream && + old_ctx->res_ctx.pipe_ctx[i].top_pipe->plane_state == context->res_ctx.pipe_ctx[j].plane_state && !context->res_ctx.pipe_ctx[j].top_pipe && !context->res_ctx.pipe_ctx[j].update_flags.bits.disable) { - found_stream = true; + found_pipe = true; break; } } // Disconnect if the top pipe lost it's pipe split - if (found_stream && !context->res_ctx.pipe_ctx[j].bottom_pipe) { + if (found_pipe && !context->res_ctx.pipe_ctx[j].bottom_pipe) { hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); mpcc_disconnected = true; } } - found_stream = false; + found_pipe = false; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 842abb4c475b..9cf139be3f40 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -619,7 +619,7 @@ void enc1_stream_encoder_dvi_set_stream_attribute( enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); } -void enc1_stream_encoder_set_mst_bandwidth( +void enc1_stream_encoder_set_throttled_vcp_size( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp) { @@ -1616,8 +1616,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = { enc1_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = enc1_stream_encoder_dvi_set_stream_attribute, - .set_mst_bandwidth = - enc1_stream_encoder_set_mst_bandwidth, + .set_throttled_vcp_size = + enc1_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = enc1_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 30eae7459d50..b99d2527cf03 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -588,7 +588,7 @@ void enc1_stream_encoder_dvi_set_stream_attribute( struct dc_crtc_timing *crtc_timing, bool is_dual_link); -void enc1_stream_encoder_set_mst_bandwidth( +void enc1_stream_encoder_set_throttled_vcp_size( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index bb920d0e0b89..368818d2dfc6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -908,6 +908,9 @@ bool hubp2_is_flip_pending(struct hubp *hubp) struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); struct dc_plane_address earliest_inuse_address; + if (hubp && hubp->power_gated) + return false; + REG_GET(DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, &flip_pending); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index c8cfd3ba1c15..01530e686f43 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1251,6 +1251,11 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx return; } + /* Detect plane change */ + if (old_pipe->plane_state != new_pipe->plane_state) { + new_pipe->update_flags.bits.plane_changed = true; + } + /* Detect top pipe only changes */ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { /* Detect odm changes */ @@ -1392,6 +1397,7 @@ static void dcn20_update_dchubp_dpp( &pipe_ctx->ttu_regs); if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || plane_state->update_flags.bits.bpp_change || plane_state->update_flags.bits.input_csc_change || plane_state->update_flags.bits.color_space_change || @@ -1414,6 +1420,7 @@ static void dcn20_update_dchubp_dpp( } if (pipe_ctx->update_flags.bits.mpcc + || pipe_ctx->update_flags.bits.plane_changed || plane_state->update_flags.bits.global_alpha_change || plane_state->update_flags.bits.per_pixel_alpha_change) { // MPCC inst is equal to pipe index in practice @@ -1515,6 +1522,7 @@ static void dcn20_update_dchubp_dpp( } if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || pipe_ctx->update_flags.bits.opp_changed || plane_state->update_flags.bits.pixel_format_change || plane_state->update_flags.bits.horizontal_mirror_change || @@ -1539,7 +1547,9 @@ static void dcn20_update_dchubp_dpp( hubp->power_gated = false; } - if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + plane_state->update_flags.bits.addr_update) hws->funcs.update_plane_addr(dc, pipe_ctx); @@ -1632,16 +1642,26 @@ void dcn20_program_front_end_for_ctx( struct dce_hwseq *hws = dc->hwseq; DC_LOGGER_INIT(dc->ctx->logger); - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + /* Carry over GSL groups in case the context is changing. */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe_ctx = + &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream == old_pipe_ctx->stream) + pipe_ctx->stream_res.gsl_group = + old_pipe_ctx->stream_res.gsl_group; + } + + if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) { - ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); - if (dc->hwss.program_triplebuffer != NULL && - !dc->debug.disable_tri_buf) { + if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) { + ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( - dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); + dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); } } } @@ -1909,9 +1929,9 @@ void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.dsc) { struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; - dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true); + hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true); while (odm_pipe) { - dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true); + hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true); odm_pipe = odm_pipe->next_odm_pipe; } } @@ -1924,9 +1944,9 @@ void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.dsc) { struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; - dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false); + hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false); while (odm_pipe) { - dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false); + hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false); odm_pipe = odm_pipe->next_odm_pipe; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 1b9874445134..18b9465057ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -409,8 +409,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { }, }, .num_states = 5, - .sr_exit_time_us = 8.6, - .sr_enter_plus_exit_time_us = 10.9, + .sr_exit_time_us = 11.6, + .sr_enter_plus_exit_time_us = 13.9, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, @@ -1075,7 +1075,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = false, - .disable_tri_buf = true, .underflow_assert_delay_us = 0xFFFFFFFF, }; @@ -1092,6 +1091,7 @@ static const struct dc_debug_options debug_defaults_diags = { .disable_stutter = true, .scl_reset_length10 = true, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_tri_buf = true, }; void dcn20_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index e3984f02b7b3..4075ae111530 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -561,8 +561,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { enc1_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = enc1_stream_encoder_dvi_set_stream_attribute, - .set_mst_bandwidth = - enc1_stream_encoder_set_mst_bandwidth, + .set_throttled_vcp_size = + enc1_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = enc2_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 78743ae37851..e73785e74cba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -894,6 +894,8 @@ static const struct dc_debug_options debug_defaults_diags = { .disable_pplib_wm_range = true, .disable_stutter = true, .disable_48mhz_pwrdwn = true, + .disable_psr = true, + .enable_tri_buf = true }; enum dcn20_clk_src_array_id { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index a139a87a1a81..41a1d0e9b7e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -122,8 +122,6 @@ bool cm3_helper_translate_curve_to_hw_format( if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS) return false; - PERF_TRACE_CTX(output_tf->ctx); - corner_points = lut_params->corner_points; rgb_resulted = lut_params->rgb_resulted; hw_points = 0; @@ -314,8 +312,6 @@ bool cm3_helper_translate_curve_to_degamma_hw_format( if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS) return false; - PERF_TRACE_CTX(output_tf->ctx); - corner_points = lut_params->corner_points; rgb_resulted = lut_params->rgb_resulted; hw_points = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index f5e80a0db72b..6c0f7ef0a3df 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -790,8 +790,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = { enc3_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = enc3_stream_encoder_dvi_set_stream_attribute, - .set_mst_bandwidth = - enc1_stream_encoder_set_mst_bandwidth, + .set_throttled_vcp_size = + enc1_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = enc3_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index a5d750ed569e..204773ffc376 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -35,7 +35,6 @@ #include "dcn30_dpp.h" #include "dcn10/dcn10_cm_common.h" #include "dcn30_cm_common.h" -#include "clk_mgr.h" #include "reg_helper.h" #include "abm.h" #include "clk_mgr.h" @@ -220,15 +219,13 @@ static void dcn30_set_writeback( struct dc_writeback_info *wb_info, struct dc_state *context) { - struct dwbc *dwb; struct mcif_wb *mcif_wb; struct mcif_buf_params *mcif_buf_params; ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES); ASSERT(wb_info->wb_enabled); ASSERT(wb_info->mpcc_inst >= 0); - ASSERT(wb_info->mpcc_inst < 4); - dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + ASSERT(wb_info->mpcc_inst < dc->res_pool->mpcc_count); mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; mcif_buf_params = &wb_info->mcif_buf_params; @@ -692,26 +689,23 @@ void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx) bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) { - unsigned int surface_size; - if (!dc->ctx->dmub_srv) return false; if (enable) { - if (dc->current_state - && dc->current_state->stream_count == 1 // single display only - && dc->current_state->stream_status[0].plane_count == 1 // single surface only - && dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part == 0 // no VM - // Only 8 and 16 bit formats - && dc->current_state->stream_status[0].plane_states[0]->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F - && dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) { - - surface_size = dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch * - dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height * - (dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4); - + if (dc->current_state) { + int i; + + /* First, check no-memory-requests case */ + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc->current_state->stream_status[i] + .plane_count) + /* Fail eligibility on a visible stream */ + break; + } } + /* No applicable optimizations */ return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 224c8d145eba..6d13431ff693 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -179,8 +179,7 @@ void optc3_set_dsc_config(struct timing_generator *optc, } - -static void optc3_set_odm_bypass(struct timing_generator *optc, +void optc3_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -277,7 +276,7 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in * * Options: any time, start of frame, dp start of frame (range timing) */ -void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable) +static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable) { struct optc *optc1 = DCN10TG_FROM_TG(optc); uint32_t mode = enable ? 2 : 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h index 33f13c1e7520..379616831636 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h @@ -339,4 +339,8 @@ void optc3_set_dsc_config(struct timing_generator *optc, void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable); +void optc3_set_odm_bypass(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing); +void optc3_tg_init(struct timing_generator *optc); + #endif /* __DC_OPTC_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 8be4f21169d0..dde87baf1370 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -79,6 +79,7 @@ #include "reg_helper.h" #include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" @@ -832,7 +833,7 @@ static const struct dc_plane_cap plane_cap = { }; static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, + .disable_dmcu = true, //No DMCU on DCN30 .force_abm_enable = false, .timing_trace = false, .clock_trace = true, @@ -849,10 +850,11 @@ static const struct dc_debug_options debug_defaults_drv = { .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, + .disable_psr = false, }; static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, + .disable_dmcu = true, //No dmcu on DCN30 .force_abm_enable = false, .timing_trace = true, .clock_trace = true, @@ -865,6 +867,8 @@ static const struct dc_debug_options debug_defaults_diags = { .scl_reset_length10 = true, .dwb_fi_phase = -1, // -1 = disable .dmub_command_table = true, + .disable_psr = true, + .enable_tri_buf = true, }; void dcn30_dpp_destroy(struct dpp **dpp) @@ -1312,6 +1316,9 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool) dce_abm_destroy(&pool->base.multiple_abms[i]); } + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); } @@ -1821,6 +1828,22 @@ static bool init_soc_bounding_box(struct dc *dc, loaded_ip->max_num_dpp = pool->base.pipe_count; loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; dcn20_patch_bounding_box(dc, loaded_bb); + + if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) { + struct bp_soc_bb_info bb_info = {0}; + + if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_0_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10; + + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_0_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10; + + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_0_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10; + } + } + return true; } @@ -2203,6 +2226,9 @@ static void dcn30_calculate_wm( context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->perf_params.stutter_period_us = + context->bw_ctx.dml.vba.StutterPeriod; + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; @@ -2623,6 +2649,14 @@ static bool dcn30_resource_construct( } } pool->base.timing_generator_count = i; + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + + if (pool->base.psr == NULL) { + dm_error("DC: failed to create PSR obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } /* ABM */ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index ae608c329366..3586934df25f 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -30,8 +30,6 @@ * interface to PPLIB/SMU to setup clocks and pstate requirements on SoC */ -typedef bool BOOLEAN; - enum pp_smu_ver { /* * PP_SMU_INTERFACE_X should be interpreted as the interface defined @@ -240,7 +238,7 @@ struct pp_smu_funcs_nv { * DC hardware */ enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp, - BOOLEAN pstate_handshake_supported); + bool pstate_handshake_supported); }; #define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 80170f9721ce..860e72a51534 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -2635,15 +2635,14 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP } if (mode_lib->vba.DRAMClockChangeSupportsVActive && - mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { + mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 && + mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) { mode_lib->vba.DRAMClockChangeWatermark += 25; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { - if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) { - if (mode_lib->vba.DRAMClockChangeWatermark > - dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark)) - mode_lib->vba.MinTTUVBlank[k] += 25; - } + if (mode_lib->vba.DRAMClockChangeWatermark > + dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark)) + mode_lib->vba.MinTTUVBlank[k] += 25; } mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h index 1e557ddcb638..d0b90947f540 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h @@ -33,7 +33,7 @@ struct display_mode_lib; // Function: dml_rq_dlg_get_rq_reg // Main entry point for test to get the register values out of this DML class. -// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate +// This function calls <get_rq_param> and <extract_rq_regs> functions to calculate // and then populate the rq_regs struct // Input: // pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h index 0d53e871a9d1..27cf8bed9376 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h @@ -33,7 +33,7 @@ struct display_mode_lib; // Function: dml_rq_dlg_get_rq_reg // Main entry point for test to get the register values out of this DML class. -// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate +// This function calls <get_rq_param> and <extract_rq_regs> functions to calculate // and then populate the rq_regs struct // Input: // pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index a576eed94d9b..367c82b5ab4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -1294,7 +1294,7 @@ static unsigned int CalculateVMAndRowBytes( unsigned int MacroTileHeight; unsigned int ExtraDPDEBytesFrame; unsigned int PDEAndMetaPTEBytesFrame; - unsigned int PixelPTEReqHeightPTEs; + unsigned int PixelPTEReqHeightPTEs = 0; if (DCCEnable == true) { *MetaRequestHeight = 8 * BlockHeight256Bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 2beb284f89b0..50b7d011705d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -597,7 +597,8 @@ static void CalculateStutterEfficiency( double meta_row_bw[], double dpte_row_bw[], double *StutterEfficiencyNotIncludingVBlank, - double *StutterEfficiency); + double *StutterEfficiency, + double *StutterPeriodOut); static void CalculateSwathAndDETConfiguration( bool ForceSingleDPP, @@ -3134,7 +3135,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->meta_row_bw, v->dpte_row_bw, &v->StutterEfficiencyNotIncludingVBlank, - &v->StutterEfficiency); + &v->StutterEfficiency, + &v->StutterPeriod); } static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib) @@ -3235,7 +3237,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes( *BytePerPixelDETC = 0; *BytePerPixelY = 4; *BytePerPixelC = 0; - } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) { + } else if (SourcePixelFormat == dm_444_16) { *BytePerPixelDETY = 2; *BytePerPixelDETC = 0; *BytePerPixelY = 2; @@ -5305,7 +5307,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ViewportExceedsSurface = true; if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16 - && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { + && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) { ViewportExceedsSurface = true; } @@ -5515,7 +5517,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( if (WritebackPixelFormat[k] == dm_444_64) { WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2; } - if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave || mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) { + if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) { WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2; } WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark; @@ -6151,7 +6153,8 @@ static void CalculateStutterEfficiency( double meta_row_bw[], double dpte_row_bw[], double *StutterEfficiencyNotIncludingVBlank, - double *StutterEfficiency) + double *StutterEfficiency, + double *StutterPeriodOut) { double FullDETBufferingTimeY[DC__NUM_DPP__MAX] = { 0 }; double FrameTimeForMinFullDETBufferingTime = 0; @@ -6262,6 +6265,9 @@ static void CalculateStutterEfficiency( } *StutterEfficiency = (*StutterEfficiencyNotIncludingVBlank / 100.0 * (FrameTimeForMinFullDETBufferingTime - SmallestVBlank) + SmallestVBlank) / FrameTimeForMinFullDETBufferingTime * 100; + + if (StutterPeriodOut) + *StutterPeriodOut = StutterPeriod; } static void CalculateSwathAndDETConfiguration( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index 5bb10f6e300d..416bf6fb67bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -279,7 +279,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes( *BytePerPixelDETC = 0; *BytePerPixelY = 4; *BytePerPixelC = 0; - } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) { + } else if (SourcePixelFormat == dm_444_16) { *BytePerPixelDETY = 2; *BytePerPixelDETC = 0; *BytePerPixelY = 2; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h index e5b17e1104c6..c04965cceff3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h @@ -32,7 +32,7 @@ struct display_mode_lib; // Function: dml_rq_dlg_get_rq_reg // Main entry point for test to get the register values out of this DML class. -// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate +// This function calls <get_rq_param> and <extract_rq_regs> functions to calculate // and then populate the rq_regs struct // Input: // pipe_param - pipe source configuration (e.g. vp, pitch, scaling, dest, etc.) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c index cf98aa827a9a..e883864cff3c 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c @@ -162,7 +162,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en) } -/* fucntion table */ +/* function table */ static const struct hw_factory_funcs funcs = { .init_ddc_data = dal_hw_ddc_init, .init_generic = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c index b38c96c9fed3..7d36b56346a6 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c @@ -194,7 +194,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en) } -/* fucntion table */ +/* function table */ static const struct hw_factory_funcs funcs = { .init_ddc_data = dal_hw_ddc_init, .init_generic = dal_hw_generic_init, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c index 83f798cb8b21..9b63c6c0cc84 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c @@ -221,7 +221,7 @@ static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en) generic->base.regs = &generic_regs[en].gpio; } -/* fucntion table */ +/* function table */ static const struct hw_factory_funcs funcs = { .init_ddc_data = dal_hw_ddc_init, .init_generic = dal_hw_generic_init, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c index 907c5911eb9e..2f57ee6deabc 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c @@ -202,7 +202,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en) } -/* fucntion table */ +/* function table */ static const struct hw_factory_funcs funcs = { .init_ddc_data = dal_hw_ddc_init, .init_generic = dal_hw_generic_init, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c index 7e7fb6572107..3be2c90b0c61 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c @@ -218,7 +218,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en) } -/* fucntion table */ +/* function table */ static const struct hw_factory_funcs funcs = { .init_ddc_data = dal_hw_ddc_init, .init_generic = dal_hw_generic_init, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 329395ee7461..1daa563c8ff4 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -300,6 +300,7 @@ union pipe_update_flags { uint32_t gamut_remap : 1; uint32_t scaler : 1; uint32_t viewport : 1; + uint32_t plane_changed : 1; } bits; uint32_t raw; }; @@ -396,6 +397,7 @@ struct dc_state { struct dc_stream_state *streams[MAX_PIPES]; struct dc_stream_status stream_status[MAX_PIPES]; uint8_t stream_count; + uint8_t stream_mask; struct resource_context res_ctx; @@ -410,6 +412,10 @@ struct dc_state { struct clk_mgr *clk_mgr; struct kref refcount; + + struct { + unsigned int stutter_period_us; + } perf_params; }; #endif /* _CORE_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 11ce06e69d3f..0184cefb083b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -143,7 +143,7 @@ struct stream_encoder_funcs { struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing); - void (*set_mst_bandwidth)( + void (*set_throttled_vcp_size)( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp); diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index 944c0327763c..f0a0d419e555 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -46,9 +46,10 @@ static void virtual_stream_encoder_dvi_set_stream_attribute( struct dc_crtc_timing *crtc_timing, bool is_dual_link) {} -static void virtual_stream_encoder_set_mst_bandwidth( +static void virtual_stream_encoder_set_throttled_vcp_size( struct stream_encoder *enc, - struct fixed31_32 avg_time_slots_per_mtp) {} + struct fixed31_32 avg_time_slots_per_mtp) +{} static void virtual_stream_encoder_update_hdmi_info_packets( struct stream_encoder *enc, @@ -107,8 +108,8 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = { virtual_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = virtual_stream_encoder_dvi_set_stream_attribute, - .set_mst_bandwidth = - virtual_stream_encoder_set_mst_bandwidth, + .set_throttled_vcp_size = + virtual_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = virtual_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = |