Hello Yang Li, This is a semi-automatic email about new static checker warnings.
The patch a689e8d1f800: "drm/amd/display: check top_pipe_to_program pointer" from Nov 15, 2021, leads to the following Smatch complaint: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:3064 commit_planes_for_stream() error: we previously assumed 'top_pipe_to_program' could be null (see line 2887) drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c 2822 static void commit_planes_for_stream(struct dc *dc, 2823 struct dc_surface_update *srf_updates, 2824 int surface_count, 2825 struct dc_stream_state *stream, 2826 struct dc_stream_update *stream_update, 2827 enum surface_update_type update_type, 2828 struct dc_state *context) 2829 { 2830 int i, j; 2831 struct pipe_ctx *top_pipe_to_program = NULL; Set to NULL here 2832 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 2833 2834 #if defined(CONFIG_DRM_AMD_DC_DCN) 2835 dc_z10_restore(dc); 2836 #endif 2837 2838 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 2839 /* Optimize seamless boot flag keeps clocks and watermarks high until 2840 * first flip. After first flip, optimization is required to lower 2841 * bandwidth. Important to note that it is expected UEFI will 2842 * only light up a single display on POST, therefore we only expect 2843 * one stream with seamless boot flag set. 2844 */ 2845 if (stream->apply_seamless_boot_optimization) { 2846 stream->apply_seamless_boot_optimization = false; 2847 2848 if (get_seamless_boot_stream_count(context) == 0) 2849 dc->optimized_required = true; 2850 } 2851 } 2852 2853 if (update_type == UPDATE_TYPE_FULL) { 2854 #if defined(CONFIG_DRM_AMD_DC_DCN) 2855 dc_allow_idle_optimizations(dc, false); 2856 2857 #endif 2858 if (get_seamless_boot_stream_count(context) == 0) 2859 dc->hwss.prepare_bandwidth(dc, context); 2860 2861 context_clock_trace(dc, context); 2862 } 2863 2864 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2865 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2866 2867 if (!pipe_ctx->top_pipe && 2868 !pipe_ctx->prev_odm_pipe && 2869 pipe_ctx->stream && 2870 pipe_ctx->stream == stream) { 2871 top_pipe_to_program = pipe_ctx; ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Set to non-NULL here. 2872 } 2873 } 2874 2875 #ifdef CONFIG_DRM_AMD_DC_DCN 2876 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 2877 struct pipe_ctx *mpcc_pipe; 2878 struct pipe_ctx *odm_pipe; 2879 2880 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 2881 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 2882 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 2883 } 2884 #endif 2885 2886 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 2887 if (top_pipe_to_program && ^^^^^^^^^^^^^^^^^^^ The patch adds a new NULL check to make clang happy. 2888 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 2889 if (should_use_dmub_lock(stream->link)) { 2890 union dmub_hw_lock_flags hw_locks = { 0 }; 2891 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 2892 2893 hw_locks.bits.lock_dig = 1; 2894 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 2895 2896 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 2897 true, 2898 &hw_locks, 2899 &inst_flags); 2900 } else 2901 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 2902 top_pipe_to_program->stream_res.tg); 2903 } 2904 2905 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) 2906 dc->hwss.interdependent_update_lock(dc, context, true); 2907 else 2908 /* Lock the top pipe while updating plane addrs, since freesync requires 2909 * plane addr update event triggers to be synchronized. 2910 * top_pipe_to_program is expected to never be NULL ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This comment says that "top_pipe_to_program" is expected to never be NULL but it's unclear if it means just for this else statement or for the whole function or what? 2911 */ 2912 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 2913 2914 // Stream updates 2915 if (stream_update) 2916 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 2917 2918 if (surface_count == 0) { 2919 /* 2920 * In case of turning off screen, no need to program front end a second time. 2921 * just return after program blank. 2922 */ 2923 if (dc->hwss.apply_ctx_for_surface) 2924 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 2925 if (dc->hwss.program_front_end_for_ctx) 2926 dc->hwss.program_front_end_for_ctx(dc, context); 2927 2928 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) 2929 dc->hwss.interdependent_update_lock(dc, context, false); 2930 else 2931 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 2932 dc->hwss.post_unlock_program_front_end(dc, context); 2933 return; 2934 } 2935 2936 if (!IS_DIAG_DC(dc->ctx->dce_environment)) { 2937 for (i = 0; i < surface_count; i++) { 2938 struct dc_plane_state *plane_state = srf_updates[i].surface; 2939 /*set logical flag for lock/unlock use*/ 2940 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2941 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2942 if (!pipe_ctx->plane_state) 2943 continue; 2944 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 2945 continue; 2946 pipe_ctx->plane_state->triplebuffer_flips = false; 2947 if (update_type == UPDATE_TYPE_FAST && 2948 dc->hwss.program_triplebuffer != NULL && 2949 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 2950 /*triple buffer for VUpdate only*/ 2951 pipe_ctx->plane_state->triplebuffer_flips = true; 2952 } 2953 } 2954 if (update_type == UPDATE_TYPE_FULL) { 2955 /* force vsync flip when reconfiguring pipes to prevent underflow */ 2956 plane_state->flip_immediate = false; 2957 } 2958 } 2959 } 2960 2961 // Update Type FULL, Surface updates 2962 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2963 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2964 2965 if (!pipe_ctx->top_pipe && 2966 !pipe_ctx->prev_odm_pipe && 2967 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 2968 struct dc_stream_status *stream_status = NULL; 2969 2970 if (!pipe_ctx->plane_state) 2971 continue; 2972 2973 /* Full fe update*/ 2974 if (update_type == UPDATE_TYPE_FAST) 2975 continue; 2976 2977 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 2978 2979 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 2980 /*turn off triple buffer for full update*/ 2981 dc->hwss.program_triplebuffer( 2982 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 2983 } 2984 stream_status = 2985 stream_get_status(context, pipe_ctx->stream); 2986 2987 if (dc->hwss.apply_ctx_for_surface) 2988 dc->hwss.apply_ctx_for_surface( 2989 dc, pipe_ctx->stream, stream_status->plane_count, context); 2990 } 2991 } 2992 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 2993 dc->hwss.program_front_end_for_ctx(dc, context); 2994 #ifdef CONFIG_DRM_AMD_DC_DCN 2995 if (dc->debug.validate_dml_output) { 2996 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2997 struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; 2998 if (cur_pipe.stream == NULL) 2999 continue; 3000 3001 cur_pipe.plane_res.hubp->funcs->validate_dml_output( 3002 cur_pipe.plane_res.hubp, dc->ctx, 3003 &context->res_ctx.pipe_ctx[i].rq_regs, 3004 &context->res_ctx.pipe_ctx[i].dlg_regs, 3005 &context->res_ctx.pipe_ctx[i].ttu_regs); 3006 } 3007 } 3008 #endif 3009 } 3010 3011 // Update Type FAST, Surface updates 3012 if (update_type == UPDATE_TYPE_FAST) { 3013 if (dc->hwss.set_flip_control_gsl) 3014 for (i = 0; i < surface_count; i++) { 3015 struct dc_plane_state *plane_state = srf_updates[i].surface; 3016 3017 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3018 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3019 3020 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3021 continue; 3022 3023 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3024 continue; 3025 3026 // GSL has to be used for flip immediate 3027 dc->hwss.set_flip_control_gsl(pipe_ctx, 3028 pipe_ctx->plane_state->flip_immediate); 3029 } 3030 } 3031 3032 /* Perform requested Updates */ 3033 for (i = 0; i < surface_count; i++) { 3034 struct dc_plane_state *plane_state = srf_updates[i].surface; 3035 3036 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3037 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3038 3039 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3040 continue; 3041 3042 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3043 continue; 3044 3045 /*program triple buffer after lock based on flip type*/ 3046 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3047 /*only enable triplebuffer for fast_update*/ 3048 dc->hwss.program_triplebuffer( 3049 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3050 } 3051 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3052 dc->hwss.update_plane_addr(dc, pipe_ctx); 3053 } 3054 } 3055 3056 } 3057 3058 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) 3059 dc->hwss.interdependent_update_lock(dc, context, false); 3060 else 3061 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3062 3063 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This condition is exactly the same as the one where we added a NULL check at the start of the function. 3064 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Unchecked dereference. Ideally someone would know if "top_pipe_to_program" can really be NULL or not. Adding NULL checks will make the static checkers happy but it isn't necessarily the correct fix. 3065 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3066 top_pipe_to_program->stream_res.tg, regards, dan carpenter