This is an automated email from the ASF dual-hosted git repository.

gavinchou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new e3047d265ee [fix](filecache) fix crash on reset_range when clearing 
cache concurrently (#60271)
e3047d265ee is described below

commit e3047d265ee58eb1716b863b0eea39c75f54a8f2
Author: zhengyu <[email protected]>
AuthorDate: Tue Feb 3 17:37:44 2026 +0800

    [fix](filecache) fix crash on reset_range when clearing cache concurrently 
(#60271)
    
    reset_range could dereference a null cell when a block was
    evicted/removed while a downloader thread was finalizing a partial
    block. FileBlock stayed alive via refcount, but its FileBlockCell had
    already been erased from _files (when doing clear_cache operation),
    leading to SIGSEGV.
---
 be/src/io/cache/block_file_cache.cpp | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/be/src/io/cache/block_file_cache.cpp 
b/be/src/io/cache/block_file_cache.cpp
index 0aa4757c089..cc003237fe3 100644
--- a/be/src/io/cache/block_file_cache.cpp
+++ b/be/src/io/cache/block_file_cache.cpp
@@ -1227,6 +1227,12 @@ void BlockFileCache::reset_range(const UInt128Wrapper& 
hash, size_t offset, size
            _files.find(hash)->second.find(offset) != 
_files.find(hash)->second.end());
     FileBlockCell* cell = get_cell(hash, offset, cache_lock);
     DCHECK(cell != nullptr);
+    if (cell == nullptr) {
+        LOG(WARNING) << "reset_range skipped because cache cell is missing. 
hash="
+                     << hash.to_string() << " offset=" << offset << " 
old_size=" << old_size
+                     << " new_size=" << new_size;
+        return;
+    }
     if (cell->queue_iterator) {
         auto& queue = get_queue(cell->file_block->cache_type());
         DCHECK(queue.contains(hash, offset, cache_lock));


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to